diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 02:27:48 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 02:27:48 +0300 |
commit | 8bf1a529cd664c8e5268381f1e24fe67aa611dd3 (patch) | |
tree | c5cec84941923778e4e2ec5a5d65e2fc8ea71b58 /arch/arm64 | |
parent | b327dfe05258e09c8db6e1e091c2e6d84dd426a6 (diff) | |
parent | d54170812ef1c80e0fa3ed3e554a0bbfc2920d9d (diff) | |
download | linux-8bf1a529cd664c8e5268381f1e24fe67aa611dd3.tar.xz |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
- Support for arm64 SME 2 and 2.1. SME2 introduces a new 512-bit
architectural register (ZT0, for the look-up table feature) that
Linux needs to save/restore
- Include TPIDR2 in the signal context and add the corresponding
kselftests
- Perf updates: Arm SPEv1.2 support, HiSilicon uncore PMU updates, ACPI
support to the Marvell DDR and TAD PMU drivers, reset DTM_PMU_CONFIG
(ARM CMN) at probe time
- Support for DYNAMIC_FTRACE_WITH_CALL_OPS on arm64
- Permit EFI boot with MMU and caches on. Instead of cleaning the
entire loaded kernel image to the PoC and disabling the MMU and
caches before branching to the kernel bare metal entry point, leave
the MMU and caches enabled and rely on EFI's cacheable 1:1 mapping of
all of system RAM to populate the initial page tables
- Expose the AArch32 (compat) ELF_HWCAP features to user in an arm64
kernel (the arm32 kernel only defines the values)
- Harden the arm64 shadow call stack pointer handling: stash the shadow
stack pointer in the task struct on interrupt, load it directly from
this structure
- Signal handling cleanups to remove redundant validation of size
information and avoid reading the same data from userspace twice
- Refactor the hwcap macros to make use of the automatically generated
ID registers. It should make new hwcaps writing less error prone
- Further arm64 sysreg conversion and some fixes
- arm64 kselftest fixes and improvements
- Pointer authentication cleanups: don't sign leaf functions, unify
asm-arch manipulation
- Pseudo-NMI code generation optimisations
- Minor fixes for SME and TPIDR2 handling
- Miscellaneous updates: ARCH_FORCE_MAX_ORDER is now selectable,
replace strtobool() to kstrtobool() in the cpufeature.c code, apply
dynamic shadow call stack in two passes, intercept pfn changes in
set_pte_at() without the required break-before-make sequence, attempt
to dump all instructions on unhandled kernel faults
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (130 commits)
arm64: fix .idmap.text assertion for large kernels
kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests
kselftest/arm64: Copy whole EXTRA context
arm64: kprobes: Drop ID map text from kprobes blacklist
perf: arm_spe: Print the version of SPE detected
perf: arm_spe: Add support for SPEv1.2 inverted event filtering
perf: Add perf_event_attr::config3
arm64/sme: Fix __finalise_el2 SMEver check
drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable
arm64/signal: Only read new data when parsing the ZT context
arm64/signal: Only read new data when parsing the ZA context
arm64/signal: Only read new data when parsing the SVE context
arm64/signal: Avoid rereading context frame sizes
arm64/signal: Make interface for restore_fpsimd_context() consistent
arm64/signal: Remove redundant size validation from parse_user_sigframe()
arm64/signal: Don't redundantly verify FPSIMD magic
arm64/cpufeature: Use helper macros to specify hwcaps
arm64/cpufeature: Always use symbolic name for feature value in hwcaps
arm64/sysreg: Initial unsigned annotations for ID registers
arm64/sysreg: Initial annotation of signed ID registers
...
Diffstat (limited to 'arch/arm64')
57 files changed, 1649 insertions, 716 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c5ccca26a408..27b2592698b0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -123,6 +123,8 @@ config ARM64 select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER + select FUNCTION_ALIGNMENT_4B + select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS_BROADCAST @@ -184,6 +186,8 @@ config ARM64 select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ + if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ if DYNAMIC_FTRACE_WITH_ARGS select HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -1470,10 +1474,23 @@ config XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. +# include/linux/mmzone.h requires the following to be true: +# +# MAX_ORDER - 1 + PAGE_SHIFT <= SECTION_SIZE_BITS +# +# so the maximum value of MAX_ORDER is SECTION_SIZE_BITS + 1 - PAGE_SHIFT: +# +# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER | +# ----+-------------------+--------------+-----------------+--------------------+ +# 4K | 27 | 12 | 16 | 11 | +# 16K | 27 | 14 | 14 | 12 | +# 64K | 29 | 16 | 14 | 14 | config ARCH_FORCE_MAX_ORDER - int + int "Maximum zone order" if ARM64_4K_PAGES || ARM64_16K_PAGES default "14" if ARM64_64K_PAGES + range 12 14 if ARM64_16K_PAGES default "12" if ARM64_16K_PAGES + range 11 16 if ARM64_4K_PAGES default "11" help The kernel memory allocator divides physically contiguous memory @@ -1486,7 +1503,7 @@ config ARCH_FORCE_MAX_ORDER This config option is actually maximum order plus one. For example, a value of 11 means that the largest free memory block is 2^10 pages. - We make sure that we can allocate upto a HugePage size for each configuration. + We make sure that we can allocate up to a HugePage size for each configuration. Hence we have : MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 @@ -1832,7 +1849,7 @@ config ARM64_PTR_AUTH_KERNEL bool "Use pointer authentication for kernel" default y depends on ARM64_PTR_AUTH - depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_ARMV8_3 # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) @@ -1857,7 +1874,7 @@ config CC_HAS_SIGN_RETURN_ADDRESS # GCC 7, 8 def_bool $(cc-option,-msign-return-address=all) -config AS_HAS_PAC +config AS_HAS_ARMV8_3 def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a) config AS_HAS_CFI_NEGATE_RA_STATE diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 165e544aa7f9..89a0b13b058d 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -187,7 +187,7 @@ config ARCH_MVEBU select PINCTRL_ARMADA_CP110 select PINCTRL_AC5 help - This enables support for Marvell EBU familly, including: + This enables support for Marvell EBU family, including: - Armada 3700 SoC Family - Armada 7K SoC Family - Armada 8K SoC Family diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d7dfe00df7d2..2d49aea0ff67 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -63,50 +63,37 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif -ifeq ($(CONFIG_AS_HAS_ARMV8_2), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.2-a -endif - -# Ensure that if the compiler supports branch protection we default it -# off, this will be overridden if we are using branch protection. -branch-prot-flags-y += $(call cc-option,-mbranch-protection=none) - -ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) -branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all -# We enable additional protection for leaf functions as there is some -# narrow potential for ROP protection benefits and no substantial -# performance impact has been observed. -PACRET-y := pac-ret+leaf - -# Using a shadow call stack in leaf functions is too costly, so avoid PAC there -# as well when we may be patching PAC into SCS -PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret - ifeq ($(CONFIG_ARM64_BTI_KERNEL),y) -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti + KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti +else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) + ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y) + KBUILD_CFLAGS += -mbranch-protection=pac-ret + else + KBUILD_CFLAGS += -msign-return-address=non-leaf + endif else -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y) -endif -# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the -# compiler to generate them and consequently to break the single image contract -# we pass it only to the assembler. This option is utilized only in case of non -# integrated assemblers. -ifeq ($(CONFIG_AS_HAS_PAC), y) -asm-arch := armv8.3-a -endif -endif - -KBUILD_CFLAGS += $(branch-prot-flags-y) - -ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.4-a + KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none) endif +# Tell the assembler to support instructions from the latest target +# architecture. +# +# For non-integrated assemblers we'll pass this on the command line, and for +# integrated assemblers we'll define ARM64_ASM_ARCH and ARM64_ASM_PREAMBLE for +# inline usage. +# +# We cannot pass the same arch flag to the compiler as this would allow it to +# freely generate instructions which are not supported by earlier architecture +# versions, which would prevent a single kernel image from working on earlier +# hardware. ifeq ($(CONFIG_AS_HAS_ARMV8_5), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.5-a + asm-arch := armv8.5-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) + asm-arch := armv8.4-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_3), y) + asm-arch := armv8.3-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_2), y) + asm-arch := armv8.2-a endif ifdef asm-arch @@ -139,7 +126,10 @@ endif CHECKFLAGS += -D__aarch64__ -ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) +ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y) + KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY + CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2 +else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 endif diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 48d4473e8eee..01281a5336cf 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void) asm volatile ("msr daifclr, #3" : : : "memory"); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2cfc4245d2e2..3dd8982a9ce3 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -11,6 +11,8 @@ #include <linux/kasan-checks.h> +#include <asm/alternative-macros.h> + #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -41,10 +43,11 @@ #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ - extern struct static_key_false gic_pmr_sync; \ - \ - if (static_branch_unlikely(&gic_pmr_sync)) \ - dsb(sy); \ + asm volatile( \ + ALTERNATIVE_CB("dsb sy", \ + ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \ + alt_cb_patch_nops) \ + ); \ } while(0) #else #define pmr_sync() do {} while (0) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..6bf013fb110d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void) cpus_have_const_cap(ARM64_SME); } +static __always_inline bool system_supports_sme2(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME2); +} + static __always_inline bool system_supports_fa64(void) { return IS_ENABLED(CONFIG_ARM64_SME) && @@ -806,7 +812,7 @@ static inline bool system_has_full_ptr_auth(void) static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && - cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); + cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } static inline bool system_supports_mte(void) @@ -864,7 +870,11 @@ static inline bool cpu_has_hw_af(void) if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) return false; - mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + /* + * Use cached version to avoid emulated msr operation on KVM + * guests. + */ + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index de4ff90785b2..acaa39f6381a 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -114,6 +114,8 @@ static inline unsigned long efi_get_kimg_min_align(void) #define EFI_ALLOC_ALIGN SZ_64K #define EFI_ALLOC_LIMIT ((1UL << 48) - 1) +extern unsigned long primary_entry_offset(void); + /* * On ARM systems, virtually remapped UEFI runtime services are set up in two * distinct stages: diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..2cdd010f9524 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -53,10 +53,10 @@ cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, - and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical - mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ - 1 << SYS_PMSCR_EL2_PA_SHIFT) + mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ + 1 << PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter .Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) @@ -177,7 +177,7 @@ /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as - * if VHE was not evailable. The kernel context will be upgraded to VHE + * if VHE was not available. The kernel context will be upgraded to VHE * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 206de10524e3..c9f15b9e3c71 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -350,6 +350,7 @@ #define ESR_ELx_SME_ISS_ILL 1 #define ESR_ELx_SME_ISS_SM_DISABLED 2 #define ESR_ELx_SME_ISS_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_ZT_DISABLED 4 #ifndef __ASSEMBLY__ #include <asm/types.h> diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index e6fa1e2982c8..67f2fb781f59 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void); struct cpu_fp_state { struct user_fpsimd_state *st; void *sve_state; - void *za_state; + void *sme_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; @@ -105,6 +105,13 @@ static inline void *sve_pffr(struct thread_struct *thread) return (char *)thread->sve_state + sve_ffr_offset(vl); } +static inline void *thread_zt_state(struct thread_struct *thread) +{ + /* The ZT register state is stored immediately after the ZA state */ + unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread)); + return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq); +} + extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, int restore_ffr); @@ -112,12 +119,13 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); extern void sme_set_vq(unsigned long vq_minus_1); -extern void za_save_state(void *state); -extern void za_load_state(void const *state); +extern void sme_save_state(void *state, int zt); +extern void sme_load_state(void const *state, int zt); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); @@ -355,14 +363,20 @@ extern int sme_get_current_vl(void); /* * Return how many bytes of memory are required to store the full SME - * specific state (currently just ZA) for task, given task's currently - * configured vector length. + * specific state for task, given task's currently configured vector + * length. */ -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { unsigned int vl = task_get_sme_vl(task); + size_t size; + + size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + + if (system_supports_sme2()) + size += ZT_SIG_REG_SIZE; - return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + return size; } #else @@ -382,7 +396,7 @@ static inline int sme_max_virtualisable_vl(void) { return 0; } static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { return 0; } diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 5e0910cf4832..cd03819a3b68 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -221,6 +221,28 @@ .endm /* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + _check_general_reg \nx + .inst 0xe11f8000 \ + | (\nx << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + _check_general_reg \nx + .inst 0xe13f8000 \ + | (\nx << 5) +.endm + +/* * Zero the entire ZA array * ZERO ZA */ diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5664729800ae..1c2672bbbf37 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -62,20 +62,7 @@ extern unsigned long ftrace_graph_call; extern void return_to_handler(void); -static inline unsigned long ftrace_call_adjust(unsigned long addr) -{ - /* - * Adjust addr to point at the BL in the callsite. - * See ftrace_init_nop() for the callsite sequence. - */ - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) - return addr + AARCH64_INSN_SIZE; - /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. - */ - return addr; -} +unsigned long ftrace_call_adjust(unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS struct dyn_ftrace; diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..5d45f19fda7f 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -31,12 +31,20 @@ #define COMPAT_HWCAP_VFPD32 (1 << 19) #define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) +#define COMPAT_HWCAP_FPHP (1 << 22) +#define COMPAT_HWCAP_ASIMDHP (1 << 23) +#define COMPAT_HWCAP_ASIMDDP (1 << 24) +#define COMPAT_HWCAP_ASIMDFHM (1 << 25) +#define COMPAT_HWCAP_ASIMDBF16 (1 << 26) +#define COMPAT_HWCAP_I8MM (1 << 27) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) #define COMPAT_HWCAP2_SHA1 (1 << 2) #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) +#define COMPAT_HWCAP2_SB (1 << 5) +#define COMPAT_HWCAP2_SSBS (1 << 6) #ifndef __ASSEMBLY__ #include <linux/log2.h> @@ -123,6 +131,12 @@ #define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC) #define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM) #define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1) +#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2) +#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1) +#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32) +#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32) +#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) +#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index aaf1f52fbf3e..139a88e4e852 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -420,6 +420,7 @@ __AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF) __AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F) __AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F) __AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F) +__AARCH64_INSN_FUNCS(bti, 0xFFFFFF3F, 0xD503241f) #undef __AARCH64_INSN_FUNCS diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b57b9b1e4344..e0f5f6b73edd 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -21,43 +21,77 @@ * exceptions should be unmasked. */ -/* - * CPU interrupt mask handling. - */ -static inline void arch_local_irq_enable(void) +static __always_inline bool __irqflags_uses_pmr(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); +} +static __always_inline void __daif_local_irq_enable(void) +{ + barrier(); + asm volatile("msr daifclr, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifclr, #3 // arch_local_irq_enable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQON) - : "memory"); - + barrier(); + write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1); pmr_sync(); + barrier(); } -static inline void arch_local_irq_disable(void) +static inline void arch_local_irq_enable(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_enable(); + } else { + __daif_local_irq_enable(); + } +} +static __always_inline void __daif_local_irq_disable(void) +{ + barrier(); + asm volatile("msr daifset, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifset, #3 // arch_local_irq_disable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQOFF) - : "memory"); + barrier(); + write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1); + barrier(); +} + +static inline void arch_local_irq_disable(void) +{ + if (__irqflags_uses_pmr()) { + __pmr_local_irq_disable(); + } else { + __daif_local_irq_disable(); + } +} + +static __always_inline unsigned long __daif_local_save_flags(void) +{ + return read_sysreg(daif); +} + +static __always_inline unsigned long __pmr_local_save_flags(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); } /* @@ -65,69 +99,108 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { - unsigned long flags; + if (__irqflags_uses_pmr()) { + return __pmr_local_save_flags(); + } else { + return __daif_local_save_flags(); + } +} - asm volatile(ALTERNATIVE( - "mrs %0, daif", - __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_IRQ_PRIO_MASKING) - : "=&r" (flags) - : - : "memory"); +static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} - return flags; +static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) +{ + return flags != GIC_PRIO_IRQON; } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { - int res; + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled_flags(flags); + } else { + return __daif_irqs_disabled_flags(flags); + } +} - asm volatile(ALTERNATIVE( - "and %w0, %w1, #" __stringify(PSR_I_BIT), - "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_IRQ_PRIO_MASKING) - : "=&r" (res) - : "r" ((int) flags) - : "memory"); +static __always_inline bool __daif_irqs_disabled(void) +{ + return __daif_irqs_disabled_flags(__daif_local_save_flags()); +} - return res; +static __always_inline bool __pmr_irqs_disabled(void) +{ + return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); } -static inline int arch_irqs_disabled(void) +static inline bool arch_irqs_disabled(void) { - return arch_irqs_disabled_flags(arch_local_save_flags()); + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled(); + } else { + return __daif_irqs_disabled(); + } } -static inline unsigned long arch_local_irq_save(void) +static __always_inline unsigned long __daif_local_irq_save(void) { - unsigned long flags; + unsigned long flags = __daif_local_save_flags(); + + __daif_local_irq_disable(); + + return flags; +} - flags = arch_local_save_flags(); +static __always_inline unsigned long __pmr_local_irq_save(void) +{ + unsigned long flags = __pmr_local_save_flags(); /* * There are too many states with IRQs disabled, just keep the current * state if interrupts are already disabled/masked. */ - if (!arch_irqs_disabled_flags(flags)) - arch_local_irq_disable(); + if (!__pmr_irqs_disabled_flags(flags)) + __pmr_local_irq_disable(); return flags; } +static inline unsigned long arch_local_irq_save(void) +{ + if (__irqflags_uses_pmr()) { + return __pmr_local_irq_save(); + } else { + return __daif_local_irq_save(); + } +} + +static __always_inline void __daif_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg(flags, daif); + barrier(); +} + +static __always_inline void __pmr_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg_s(flags, SYS_ICC_PMR_EL1); + pmr_sync(); + barrier(); +} + /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile(ALTERNATIVE( - "msr daif, %0", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" (flags) - : "memory"); - - pmr_sync(); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_restore(flags); + } else { + __daif_local_irq_restore(flags); + } } #endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1436fa1cde24..d3acd9c87509 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -5,8 +5,8 @@ #include <asm/assembler.h> #endif -#define __ALIGN .align 2 -#define __ALIGN_STR ".align 2" +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT +#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT /* * When using in-kernel BTI we need to ensure that PCS-conformant diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h index 6bf5adc56295..68908b82b168 100644 --- a/arch/arm64/include/asm/patching.h +++ b/arch/arm64/include/asm/patching.h @@ -7,6 +7,8 @@ int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); +int aarch64_insn_write_literal_u64(void *addr, u64 val); + int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 65e78999c75d..27455bfd64bc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } extern void __sync_icache_dcache(pte_t pteval); +bool pgattr_change_is_safe(u64 old, u64 new); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval); * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) */ -static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, +static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, pte_t pte) { pte_t old_pte; @@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", __func__, pte_val(old_pte), pte_val(pte)); + VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), + "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", + __func__, pte_val(old_pte), pte_val(pte)); } static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, @@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, mte_sync_tags(old_pte, pte); } - __check_racy_pte_update(mm, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); set_pte(ptep, pte); } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d51b32a69309..3918f2a67970 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -161,7 +161,7 @@ struct thread_struct { enum fp_type fp_type; /* registers FPSIMD or SVE? */ unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ - void *za_state; /* ZA register, if any */ + void *sme_state; /* ZA and ZT state, if any */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ unsigned long fault_address; /* fault info */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41b332c054ab..47ec58031f11 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,7 +194,7 @@ struct pt_regs { u32 unused2; #endif u64 sdei_ttbr1; - /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index ff7da1268a52..13df982a0808 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -10,15 +10,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm .macro scs_save tsk str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk + .macro scs_load_current .endm .macro scs_save tsk diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..043ecc3405e7 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -216,101 +216,22 @@ #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) - -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL - -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) - -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ @@ -575,6 +496,7 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -583,7 +505,7 @@ #define SCTLR_ELx_LSMAOE (BIT(29)) #define SCTLR_ELx_nTLSMD (BIT(28)) #define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) #define SCTLR_ELx_EIS (BIT(22)) #define SCTLR_ELx_IESB (BIT(21)) #define SCTLR_ELx_TSCXT (BIT(20)) @@ -809,8 +731,8 @@ #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index b713d30544f1..69a4fb749c65 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -96,5 +96,11 @@ #define HWCAP2_CSSC (1UL << 34) #define HWCAP2_RPRFM (1UL << 35) #define HWCAP2_SVE2P1 (1UL << 36) +#define HWCAP2_SME2 (1UL << 37) +#define HWCAP2_SME2P1 (1UL << 38) +#define HWCAP2_SME_I16I32 (1UL << 39) +#define HWCAP2_SME_BI32I32 (1UL << 40) +#define HWCAP2_SME_B16B16 (1UL << 41) +#define HWCAP2_SME_F16F16 (1UL << 42) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 9525041e4a14..656a10ea6c67 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -144,6 +144,14 @@ struct sve_context { #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ +/* TPIDR2_EL0 context */ +#define TPIDR2_MAGIC 0x54504902 + +struct tpidr2_context { + struct _aarch64_ctx head; + __u64 tpidr2; +}; + #define ZA_MAGIC 0x54366345 struct za_context { @@ -152,6 +160,14 @@ struct za_context { __u16 __reserved[3]; }; +#define ZT_MAGIC 0x5a544e01 + +struct zt_context { + struct _aarch64_ctx head; + __u16 nregs; + __u16 __reserved[3]; +}; + #endif /* !__ASSEMBLY__ */ #include <asm/sve_context.h> @@ -304,4 +320,15 @@ struct za_context { #define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) +#define ZT_SIG_REG_SIZE 512 + +#define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) + +#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) + +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) + +#define ZT_SIG_CONTEXT_SIZE(n) \ + (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) + #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 2234624536d9..ae345b06e9f7 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include <linux/arm_sdei.h> #include <linux/sched.h> +#include <linux/ftrace.h> #include <linux/kexec.h> #include <linux/mm.h> #include <linux/dma-mapping.h> @@ -194,5 +195,8 @@ int main(void) DEFINE(KIMAGE_START, offsetof(struct kimage, start)); BLANK(); #endif +#ifdef CONFIG_FUNCTION_TRACER + DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); +#endif return 0; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..45a42cf2191c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -65,6 +65,7 @@ #include <linux/bsearch.h> #include <linux/cpumask.h> #include <linux/crash_dump.h> +#include <linux/kstrtox.h> #include <linux/sort.h> #include <linux/stop_machine.h> #include <linux/sysfs.h> @@ -283,16 +284,26 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), ARM64_FTR_END, }; @@ -444,8 +455,8 @@ static const struct arm64_ftr_bits ftr_mvfr0[] = { static const struct arm64_ftr_bits ftr_mvfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0), @@ -529,12 +540,12 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = { }; static const struct arm64_ftr_bits ftr_id_isar6[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -562,7 +573,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_pfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1795,7 +1806,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) static int __init parse_kpti(char *str) { bool enabled; - int ret = strtobool(str, &enabled); + int ret = kstrtobool(str, &enabled); if (ret) return ret; @@ -2039,14 +2050,50 @@ static bool enable_pseudo_nmi; static int __init early_enable_pseudo_nmi(char *p) { - return strtobool(p, &enable_pseudo_nmi); + return kstrtobool(p, &enable_pseudo_nmi); } early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, int scope) { - return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); + /* + * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS); + if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) + return false; + + return enable_pseudo_nmi; +} + +static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * If we're not using priority masking then we won't be poking PMR_EL1, + * and there's no need to relax synchronization of writes to it, and + * ICC_CTLR_EL1 might not be accessible and we must avoid reads from + * that. + * + * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); + if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) + return false; + + /* + * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a + * hint for interrupt distribution, a DSB is not necessary when + * unmasking IRQs via PMR, and we can relax the barrier to a NOP. + * + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; } #endif @@ -2142,7 +2189,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, { .desc = "GIC system register CPU interface", - .capability = ARM64_HAS_SYSREG_GIC_CPUIF, + .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, @@ -2534,14 +2581,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * Depends on having GICv3 */ .desc = "IRQ priority masking", - .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .capability = ARM64_HAS_GIC_PRIO_MASKING, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, - .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, - .field_width = 4, - .sign = FTR_UNSIGNED, - .min_field_value = 1, + }, + { + /* + * Depends on ARM64_HAS_GIC_PRIO_MASKING + */ + .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_gic_prio_relaxed_sync, }, #endif #ifdef CONFIG_ARM64_E0PD @@ -2649,6 +2699,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = fa64_kernel_enable, }, + { + .desc = "SME2", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME2, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, + .field_width = ID_AA64PFR1_EL1_SME_WIDTH, + .min_field_value = ID_AA64PFR1_EL1_SME_SME2, + .matches = has_cpuid_feature, + .cpu_enable = sme2_kernel_enable, + }, #endif /* CONFIG_ARM64_SME */ { .desc = "WFx with timeout", @@ -2688,13 +2750,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { {}, }; -#define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \ +#define HWCAP_CPUID_MATCH(reg, field, min_value) \ .matches = has_user_cpuid_feature, \ - .sys_reg = reg, \ - .field_pos = field, \ - .field_width = width, \ - .sign = s, \ - .min_field_value = min_value, + .sys_reg = SYS_##reg, \ + .field_pos = reg##_##field##_SHIFT, \ + .field_width = reg##_##field##_WIDTH, \ + .sign = reg##_##field##_SIGNED, \ + .min_field_value = reg##_##field##_##min_value, #define __HWCAP_CAP(name, cap_type, cap) \ .desc = name, \ @@ -2702,10 +2764,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .hwcap_type = cap_type, \ .hwcap = cap, \ -#define HWCAP_CAP(reg, field, width, s, min_value, cap_type, cap) \ +#define HWCAP_CAP(reg, field, min_value, cap_type, cap) \ { \ __HWCAP_CAP(#cap, cap_type, cap) \ - HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \ + HWCAP_CPUID_MATCH(reg, field, min_value) \ } #define HWCAP_MULTI_CAP(list, cap_type, cap) \ @@ -2724,115 +2786,114 @@ static const struct arm64_cpu_capabilities arm64_features[] = { #ifdef CONFIG_ARM64_PTR_AUTH static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_APA_SHIFT, - 4, FTR_UNSIGNED, - ID_AA64ISAR1_EL1_APA_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_API_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth) }, {}, }; static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPA_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPI_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP) }, {}, }; #endif static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA1, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(ID_AA64ISAR0_EL1, DP, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(ID_AA64ISAR0_EL1, FHM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), + HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), + HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), + HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), + HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), + HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), + HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), + HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), + HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), + HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), + HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif #ifdef CONFIG_ARM64_PTR_AUTH HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ - HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), - HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_CSSC_IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRFM_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), + HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), + HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), + HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), #endif /* CONFIG_ARM64_SME */ {}, }; @@ -2862,15 +2923,23 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { #ifdef CONFIG_COMPAT HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + HWCAP_CAP(MVFR1_EL1, SIMDFMAC, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(MVFR1_EL1, FPHP, FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(MVFR1_EL1, SIMDHP, SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), + HWCAP_CAP(ID_ISAR5_EL1, AES, VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), + HWCAP_CAP(ID_ISAR5_EL1, AES, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), + HWCAP_CAP(ID_ISAR5_EL1, SHA1, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), + HWCAP_CAP(ID_ISAR5_EL1, SHA2, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), + HWCAP_CAP(ID_ISAR5_EL1, CRC32, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(ID_ISAR6_EL1, DP, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(ID_ISAR6_EL1, FHM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_ISAR6_EL1, SB, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), + HWCAP_CAP(ID_ISAR6_EL1, BF16, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(ID_ISAR6_EL1, I8MM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(ID_PFR2_EL1, SSBS, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 379695262b77..eb4378c23b3c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -119,6 +119,12 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_CSSC] = "cssc", [KERNEL_HWCAP_RPRFM] = "rprfm", [KERNEL_HWCAP_SVE2P1] = "sve2p1", + [KERNEL_HWCAP_SME2] = "sme2", + [KERNEL_HWCAP_SME2P1] = "sme2p1", + [KERNEL_HWCAP_SME_I16I32] = "smei16i32", + [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32", + [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", + [KERNEL_HWCAP_SME_F16F16] = "smef16f16", }; #ifdef CONFIG_COMPAT @@ -146,6 +152,12 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae", [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", + [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", + [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", + [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", + [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", + [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", + [COMPAT_KERNEL_HWCAP(I8MM)] = "i8mm", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) @@ -155,6 +167,8 @@ static const char *const compat_hwcap2_str[] = { [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1", [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", + [COMPAT_KERNEL_HWCAP2(SB)] = "sb", + [COMPAT_KERNEL_HWCAP2(SSBS)] = "ssbs", }; #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 229436f33df5..6325db1a2179 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -100,25 +100,35 @@ SYM_FUNC_START(sme_set_vq) SYM_FUNC_END(sme_set_vq) /* - * Save the SME state + * Save the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_save_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_save_za 0, x1, 12 +SYM_FUNC_START(sme_save_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_save_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _str_zt 0 +1: ret -SYM_FUNC_END(za_save_state) +SYM_FUNC_END(sme_save_state) /* - * Load the SME state + * Load the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_load_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_load_za 0, x1, 12 +SYM_FUNC_START(sme_load_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_load_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _ldr_zt 0 +1: ret -SYM_FUNC_END(za_load_state) +SYM_FUNC_END(sme_load_state) #endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 3b625f76ffba..350ed81324ac 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -65,13 +65,35 @@ SYM_CODE_START(ftrace_caller) stp x29, x30, [sp, #FREGS_SIZE] add x29, sp, #FREGS_SIZE - sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) - mov x1, x9 // parent_ip (callsite's LR) - ldr_l x2, function_trace_op // op - mov x3, sp // regs + /* Prepare arguments for the the tracer func */ + sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + mov x1, x9 // parent_ip (callsite's LR) + mov x3, sp // regs + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + /* + * The literal pointer to the ops is at an 8-byte aligned boundary + * which is either 12 or 16 bytes before the BL instruction in the call + * site. See ftrace_call_adjust() for details. + * + * Therefore here the LR points at `literal + 16` or `literal + 20`, + * and we can find the address of the literal in either case by + * aligning to an 8-byte boundary and subtracting 16. We do the + * alignment first as this allows us to fold the subtraction into the + * LDR. + */ + bic x2, x30, 0x7 + ldr x2, [x2, #-16] // op + + ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func + blr x4 // op->func(ip, parent_ip, op, regs) + +#else + ldr_l x2, function_trace_op // op SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) - bl ftrace_stub + bl ftrace_stub // func(ip, parent_ip, op, regs) +#endif /* * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..ab2a6e33c052 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif 1: - scs_load tsk + scs_load_current .else add x21, sp, #PT_REGS_SIZE get_current_task tsk @@ -311,13 +311,16 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Save pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 -alternative_else_nop_endif + +.Lskip_pmr_save\@: #endif /* @@ -336,15 +339,19 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Restore pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 - mrs_s x21, SYS_ICC_CTLR_EL1 - tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE - dsb sy // Ensure priority change is seen by redistributor -.L__skip_pmr_sync\@: + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy alternative_else_nop_endif + +.Lskip_pmr_restore\@: #endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR @@ -848,7 +855,7 @@ SYM_FUNC_START(cpu_switch_to) msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0 - scs_load x1 + scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) @@ -876,19 +883,19 @@ NOKPROBE(ret_from_fork) */ SYM_FUNC_START(call_on_irq_stack) #ifdef CONFIG_SHADOW_CALL_STACK - stp scs_sp, xzr, [sp, #-16]! + get_current_task x16 + scs_save x16 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 #endif + /* Create a frame record to save our LR and SP (implicit in FP) */ stp x29, x30, [sp, #-16]! mov x29, sp ldr_this_cpu x16, irq_stack_ptr, x17 - mov x15, #IRQ_STACK_SIZE - add x16, x16, x15 /* Move to the new stack and call the function there */ - mov sp, x16 + add sp, x16, #IRQ_STACK_SIZE blr x1 /* @@ -897,9 +904,7 @@ SYM_FUNC_START(call_on_irq_stack) */ mov sp, x29 ldp x29, x30, [sp], #16 -#ifdef CONFIG_SHADOW_CALL_STACK - ldp scs_sp, xzr, [sp], #16 -#endif + scs_load_current ret SYM_FUNC_END(call_on_irq_stack) NOKPROBE(call_on_irq_stack) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b6ef1af0122e..692dfefbe0ed 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, /* * TIF_SME controls whether a task can use SME without trapping while * in userspace, when TIF_SME is set then we must have storage - * alocated in sve_state and za_state to store the contents of both ZA + * alocated in sve_state and sme_state to store the contents of both ZA * and the SVE registers for both streaming and non-streaming modes. * * If both SVCR.ZA and SVCR.SM are disabled then at any point we @@ -429,7 +429,8 @@ static void task_fpsimd_load(void) write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) - za_load_state(current->thread.za_state); + sme_load_state(current->thread.sme_state, + system_supports_sme2()); if (thread_sm_enabled(¤t->thread)) restore_ffr = system_supports_fa64(); @@ -490,7 +491,8 @@ static void fpsimd_save(void) *svcr = read_sysreg_s(SYS_SVCR); if (*svcr & SVCR_ZA_MASK) - za_save_state(last->za_state); + sme_save_state(last->sme_state, + system_supports_sme2()); /* If we are in streaming mode override regular SVE. */ if (*svcr & SVCR_SM_MASK) { @@ -1257,30 +1259,30 @@ void fpsimd_release_task(struct task_struct *dead_task) #ifdef CONFIG_ARM64_SME /* - * Ensure that task->thread.za_state is allocated and sufficiently large. + * Ensure that task->thread.sme_state is allocated and sufficiently large. * * This function should be used only in preparation for replacing - * task->thread.za_state with new data. The memory is always zeroed + * task->thread.sme_state with new data. The memory is always zeroed * here to prevent stale data from showing through: this is done in * the interest of testability and predictability, the architecture * guarantees that when ZA is enabled it will be zeroed. */ void sme_alloc(struct task_struct *task) { - if (task->thread.za_state) { - memset(task->thread.za_state, 0, za_state_size(task)); + if (task->thread.sme_state) { + memset(task->thread.sme_state, 0, sme_state_size(task)); return; } /* This could potentially be up to 64K. */ - task->thread.za_state = - kzalloc(za_state_size(task), GFP_KERNEL); + task->thread.sme_state = + kzalloc(sme_state_size(task), GFP_KERNEL); } static void sme_free(struct task_struct *task) { - kfree(task->thread.za_state); - task->thread.za_state = NULL; + kfree(task->thread.sme_state); + task->thread.sme_state = NULL; } void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) @@ -1302,6 +1304,17 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) * This must be called after sme_kernel_enable(), we rely on the * feature table being sorted to ensure this. */ +void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of ZT0 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, + SYS_SMCR_EL1); +} + +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) { /* Allow use of FA64 */ @@ -1322,7 +1335,6 @@ u64 read_smcr_features(void) unsigned int vq_max; sme_kernel_enable(NULL); - sme_smstart_sm(); /* * Set the maximum possible VL. @@ -1332,11 +1344,9 @@ u64 read_smcr_features(void) smcr = read_sysreg_s(SYS_SMCR_EL1); smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */ - vq_max = sve_vq_from_vl(sve_get_vl()); + vq_max = sve_vq_from_vl(sme_get_vl()); smcr |= vq_max - 1; /* set LEN field to maximum effective value */ - sme_smstop_sm(); - return smcr; } @@ -1488,7 +1498,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) sve_alloc(current, false); sme_alloc(current); - if (!current->thread.sve_state || !current->thread.za_state) { + if (!current->thread.sve_state || !current->thread.sme_state) { force_sig(SIGKILL); return; } @@ -1609,7 +1619,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) void fpsimd_flush_thread(void) { void *sve_state = NULL; - void *za_state = NULL; + void *sme_state = NULL; if (!system_supports_fpsimd()) return; @@ -1634,8 +1644,8 @@ void fpsimd_flush_thread(void) clear_thread_flag(TIF_SME); /* Defer kfree() while in atomic context */ - za_state = current->thread.za_state; - current->thread.za_state = NULL; + sme_state = current->thread.sme_state; + current->thread.sme_state = NULL; fpsimd_flush_thread_vl(ARM64_VEC_SME); current->thread.svcr = 0; @@ -1645,7 +1655,7 @@ void fpsimd_flush_thread(void) put_cpu_fpsimd_context(); kfree(sve_state); - kfree(za_state); + kfree(sme_state); } /* @@ -1711,7 +1721,7 @@ static void fpsimd_bind_task_to_cpu(void) WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; - last->za_state = current->thread.za_state; + last->sme_state = current->thread.sme_state; last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index b30b955a8921..5545fe1a9012 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -60,6 +60,89 @@ int ftrace_regs_query_register_offset(const char *name) } #endif +unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * When using mcount, addr is the address of the mcount call + * instruction, and no adjustment is necessary. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) + return addr; + + /* + * When using patchable-function-entry without pre-function NOPS, addr + * is the address of the first NOP after the function entry point. + * + * The compiler has either generated: + * + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL <caller> + * + * Or: + * + * addr-04: BTI C + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL <caller> + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL <caller>`, which is at `addr + 4` bytes in either case. + * + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return addr + AARCH64_INSN_SIZE; + + /* + * When using patchable-function-entry with pre-function NOPs, addr is + * the address of the first pre-function NOP. + * + * Starting from an 8-byte aligned base, the compiler has either + * generated: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: NOP // To be patched to MOV X9, LR + * addr+12: NOP // To be patched to BL <caller> + * + * Or: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: BTI C + * addr+12: NOP // To be patched to MOV X9, LR + * addr+16: NOP // To be patched to BL <caller> + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL <caller>`, which is at either addr+12 or addr+16 depending on + * whether there is a BTI. + */ + + if (!IS_ALIGNED(addr, sizeof(unsigned long))) { + WARN_RATELIMIT(1, "Misaligned patch-site %pS\n", + (void *)(addr + 8)); + return 0; + } + + /* Skip the NOPs placed before the function entry point */ + addr += 2 * AARCH64_INSN_SIZE; + + /* Skip any BTI */ + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) { + u32 insn = le32_to_cpu(*(__le32 *)addr); + + if (aarch64_insn_is_bti(insn)) { + addr += AARCH64_INSN_SIZE; + } else if (insn != aarch64_insn_gen_nop()) { + WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n", + (void *)addr, insn); + } + } + + /* Skip the first NOP after function entry */ + addr += AARCH64_INSN_SIZE; + + return addr; +} + /* * Replace a single instruction, which may be a branch or NOP. * If @validate == true, a replaced instruction is checked against 'old'. @@ -98,6 +181,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned long pc; u32 new; + /* + * When using CALL_OPS, the function to call is associated with the + * call site, and we don't have a global function pointer to update. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return 0; + pc = (unsigned long)ftrace_call; new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, AARCH64_INSN_BRANCH_LINK); @@ -176,6 +266,44 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, return true; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec) +{ + const struct ftrace_ops *ops = NULL; + + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + WARN_ON_ONCE(!ops); + } + + if (!ops) + ops = &ftrace_list_ops; + + return ops; +} + +static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, + const struct ftrace_ops *ops) +{ + unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8); + return aarch64_insn_write_literal_u64((void *)literal, + (unsigned long)ops); +} + +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, &ftrace_nop_ops); +} + +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); +} +#else +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; } +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; } +#endif + /* * Turn on the call to ftrace_caller() in instrumented function */ @@ -183,6 +311,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; u32 old, new; + int ret; + + ret = ftrace_rec_update_ops(rec); + if (ret) + return ret; if (!ftrace_find_callable_addr(rec, NULL, &addr)) return -EINVAL; @@ -193,6 +326,19 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(pc, old, new, true); } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller)) + return -EINVAL; + if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller)) + return -EINVAL; + + return ftrace_rec_update_ops(rec); +} +#endif + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS /* * The compiler has inserted two NOPs before the regular function prologue. @@ -209,7 +355,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * | NOP | MOV X9, LR | MOV X9, LR | * | NOP | NOP | BL <entry> | * - * The LR value will be recovered by ftrace_regs_entry, and restored into LR + * The LR value will be recovered by ftrace_caller, and restored into LR * before returning to the regular function prologue. When a function is not * being traced, the MOV is not harmful given x9 is not live per the AAPCS. * @@ -220,6 +366,11 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { unsigned long pc = rec->ip - AARCH64_INSN_SIZE; u32 old, new; + int ret; + + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, @@ -237,9 +388,14 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, { unsigned long pc = rec->ip; u32 old = 0, new; + int ret; new = aarch64_insn_gen_nop(); + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; + /* * When using mcount, callsites in modules may have been initalized to * call an arbitrary module PLT (which redirects to the _mcount stub) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 952e17bd1c0b..b98970907226 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -70,13 +70,14 @@ __EFI_PE_HEADER - __INIT + .section ".idmap.text","a" /* * The following callee saved general purpose registers are used on the * primary lowlevel boot path: * * Register Scope Purpose + * x19 primary_entry() .. start_kernel() whether we entered with the MMU on * x20 primary_entry() .. __primary_switch() CPU boot mode * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob @@ -86,10 +87,23 @@ * x28 create_idmap() callee preserved temp register */ SYM_CODE_START(primary_entry) + bl record_mmu_state bl preserve_boot_args + bl create_idmap + + /* + * If we entered with the MMU and caches on, clean the ID mapped part + * of the primary boot code to the PoC so we can safely execute it with + * the MMU off. + */ + cbz x19, 0f + adrp x0, __idmap_text_start + adr_l x1, __idmap_text_end + adr_l x2, dcache_clean_poc + blr x2 +0: mov x0, x19 bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 - bl create_idmap /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for @@ -109,6 +123,40 @@ SYM_CODE_START(primary_entry) b __primary_switch SYM_CODE_END(primary_entry) + __INIT +SYM_CODE_START_LOCAL(record_mmu_state) + mrs x19, CurrentEL + cmp x19, #CurrentEL_EL2 + mrs x19, sctlr_el1 + b.ne 0f + mrs x19, sctlr_el2 +0: +CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) +CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) + tst x19, #SCTLR_ELx_C // Z := (C == 0) + and x19, x19, #SCTLR_ELx_M // isolate M bit + csel x19, xzr, x19, eq // clear x19 if Z + ret + + /* + * Set the correct endianness early so all memory accesses issued + * before init_kernel_el() occur in the correct byte order. Note that + * this means the MMU must be disabled, or the active ID map will end + * up getting interpreted with the wrong byte order. + */ +1: eor x19, x19, #SCTLR_ELx_EE + bic x19, x19, #SCTLR_ELx_M + b.ne 2f + pre_disable_mmu_workaround + msr sctlr_el2, x19 + b 3f + pre_disable_mmu_workaround +2: msr sctlr_el1, x19 +3: isb + mov x19, xzr + ret +SYM_CODE_END(record_mmu_state) + /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ @@ -119,11 +167,14 @@ SYM_CODE_START_LOCAL(preserve_boot_args) stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy // needed before dc ivac with // MMU off add x1, x0, #0x20 // 4 x 8 bytes b dcache_inval_poc // tail call +0: str_l x19, mmu_enabled_at_boot, x0 + ret SYM_CODE_END(preserve_boot_args) SYM_FUNC_START_LOCAL(clear_page_tables) @@ -360,12 +411,13 @@ SYM_FUNC_START_LOCAL(create_idmap) * accesses (MMU disabled), invalidate those tables again to * remove any speculatively loaded cache lines. */ + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy adrp x0, init_idmap_pg_dir adrp x1, init_idmap_pg_end bl dcache_inval_poc - ret x28 +0: ret x28 SYM_FUNC_END(create_idmap) SYM_FUNC_START_LOCAL(create_kernel_mapping) @@ -404,7 +456,7 @@ SYM_FUNC_END(create_kernel_mapping) stp xzr, xzr, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME - scs_load \tsk + scs_load_current adr_l \tmp1, __per_cpu_offset ldr w\tmp2, [\tsk, #TSK_TI_CPU] @@ -476,7 +528,7 @@ SYM_FUNC_END(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","awx" + .section ".idmap.text","a" /* * Starting from EL2 or EL1, configure the CPU to execute at the highest @@ -489,14 +541,17 @@ SYM_FUNC_END(__primary_switched) * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if * booted in EL1 or EL2 respectively, with the top 32 bits containing * potential context flags. These flags are *not* stored in __boot_cpu_mode. + * + * x0: whether we are being called from the primary boot path with the MMU on */ SYM_FUNC_START(init_kernel_el) - mrs x0, CurrentEL - cmp x0, #CurrentEL_EL2 + mrs x1, CurrentEL + cmp x1, #CurrentEL_EL2 b.eq init_el2 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) mov_q x0, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround msr sctlr_el1, x0 isb mov_q x0, INIT_PSTATE_EL1 @@ -506,6 +561,15 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) eret SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) + msr elr_el2, lr + + // clean all HYP code to the PoC if we booted at EL2 with the MMU on + cbz x0, 0f + adrp x0, __hyp_idmap_text_start + adr_l x1, __hyp_text_end + adr_l x2, dcache_clean_poc + blr x2 +0: mov_q x0, HCR_HOST_NVHE_FLAGS msr hcr_el2, x0 isb @@ -529,38 +593,27 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) cbz x0, 1f /* Set a sane SCTLR_EL1, the VHE way */ + pre_disable_mmu_workaround msr_s SYS_SCTLR_EL12, x1 mov x2, #BOOT_CPU_FLAG_E2H b 2f 1: + pre_disable_mmu_workaround msr sctlr_el1, x1 mov x2, xzr 2: - msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 eret SYM_FUNC_END(init_kernel_el) -/* - * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed - * in w0. See arch/arm64/include/asm/virt.h for more info. - */ -SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) - adr_l x1, __boot_cpu_mode - cmp w0, #BOOT_CPU_MODE_EL2 - b.ne 1f - add x1, x1, #4 -1: str w0, [x1] // Save CPU boot mode - ret -SYM_FUNC_END(set_cpu_boot_mode_flag) - /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ SYM_FUNC_START(secondary_holding_pen) + mov x0, xzr bl init_kernel_el // w0=cpu_boot_mode mrs x2, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK @@ -578,6 +631,7 @@ SYM_FUNC_END(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ SYM_FUNC_START(secondary_entry) + mov x0, xzr bl init_kernel_el // w0=cpu_boot_mode b secondary_startup SYM_FUNC_END(secondary_entry) @@ -587,7 +641,6 @@ SYM_FUNC_START_LOCAL(secondary_startup) * Common entry point for secondary CPUs. */ mov x20, x0 // preserve boot mode - bl finalise_el2 bl __cpu_secondary_check52bitva #if VA_BITS > 48 ldr_l x0, vabits_actual @@ -600,9 +653,14 @@ SYM_FUNC_START_LOCAL(secondary_startup) br x8 SYM_FUNC_END(secondary_startup) + .text SYM_FUNC_START_LOCAL(__secondary_switched) mov x0, x20 bl set_cpu_boot_mode_flag + + mov x0, x20 + bl finalise_el2 + str_l xzr, __early_cpu_boot_status, x3 adr_l x5, vectors msr vbar_el1, x5 @@ -629,6 +687,19 @@ SYM_FUNC_START_LOCAL(__secondary_too_slow) SYM_FUNC_END(__secondary_too_slow) /* + * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed + * in w0. See arch/arm64/include/asm/virt.h for more info. + */ +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) + adr_l x1, __boot_cpu_mode + cmp w0, #BOOT_CPU_MODE_EL2 + b.ne 1f + add x1, x1, #4 +1: str w0, [x1] // Save CPU boot mode + ret +SYM_FUNC_END(set_cpu_boot_mode_flag) + +/* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. * @@ -659,6 +730,7 @@ SYM_FUNC_END(__secondary_too_slow) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ + .section ".idmap.text","a" SYM_FUNC_START(__enable_mmu) mrs x3, ID_AA64MMFR0_EL1 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 2ee18c860f2a..111ff33d93ee 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -132,6 +132,13 @@ SYM_CODE_START_LOCAL(__finalise_el2) orr x0, x0, SMCR_ELx_FA64_MASK .Lskip_sme_fa64: + // ZT0 available? + mrs_s x1, SYS_ID_AA64SMFR0_EL1 + __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0 +.Linit_sme_zt0: + orr x0, x0, SMCR_ELx_EZT0_MASK +.Lskip_sme_zt0: + orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector msr_s SYS_SMCR_EL2, x0 // length for EL1. diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 95133765ed29..d833d78a7f31 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -131,6 +131,7 @@ static const struct ftr_set_desc smfr0 __initconst = { .name = "id_aa64smfr0", .override = &id_aa64smfr0_override, .fields = { + FIELD("smever", ID_AA64SMFR0_EL1_SMEver_SHIFT, NULL), /* FA64 is a one bit field... :-/ */ { "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, }, {} diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..8309197c0ebd 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -10,7 +10,7 @@ #error This file should only be included in vmlinux.lds.S #endif -PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); +PROVIDE(__efistub_primary_entry = primary_entry); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to @@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); * linked at. The routines below are all implemented in assembler in a * position independent manner */ -PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc); +PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou); PROVIDE(__efistub__text = _text); PROVIDE(__efistub__end = _end); +PROVIDE(__efistub___inittext_end = __inittext_end); PROVIDE(__efistub__edata = _edata); PROVIDE(__efistub_screen_info = screen_info); PROVIDE(__efistub__ctype = _ctype); @@ -67,9 +68,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -/* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI -KVM_NVHE_ALIAS(gic_pmr_sync); /* Static key checked in GIC_PRIO_IRQOFF. */ KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c index 1b3da02d5b74..a1fe4b4ff591 100644 --- a/arch/arm64/kernel/patch-scs.c +++ b/arch/arm64/kernel/patch-scs.c @@ -130,7 +130,8 @@ struct eh_frame { static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, bool fde_has_augmentation_data, - int code_alignment_factor) + int code_alignment_factor, + bool dry_run) { int size = frame->size - offsetof(struct eh_frame, opcodes) + 4; u64 loc = (u64)offset_to_ptr(&frame->initial_loc); @@ -184,7 +185,8 @@ static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, break; case DW_CFA_negate_ra_state: - scs_patch_loc(loc - 4); + if (!dry_run) + scs_patch_loc(loc - 4); break; case 0x40 ... 0x7f: @@ -235,9 +237,12 @@ int noinstr scs_patch(const u8 eh_frame[], int size) } else { ret = scs_handle_fde_frame(frame, fde_has_augmentation_data, - code_alignment_factor); + code_alignment_factor, + true); if (ret) return ret; + scs_handle_fde_frame(frame, fde_has_augmentation_data, + code_alignment_factor, false); } p += sizeof(frame->size) + frame->size; diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c index 33e0fabc0b79..b4835f6d594b 100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@ -88,6 +88,23 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) return __aarch64_insn_write(addr, cpu_to_le32(insn)); } +noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val) +{ + u64 *waddr; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &val, sizeof(val)); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f35d059a9a36..70b91a8c6bb3 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -387,10 +387,6 @@ int __init arch_populate_kprobe_blacklist(void) (unsigned long)__irqentry_text_end); if (ret) return ret; - ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, - (unsigned long)__idmap_text_end); - if (ret) - return ret; ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, (unsigned long)__hyp_text_end); if (ret || is_kernel_in_hyp_mode()) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 269ac1c25ae2..71d59b5abede 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -307,27 +307,28 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * In the unlikely event that we create a new thread with ZA - * enabled we should retain the ZA state so duplicate it here. - * This may be shortly freed if we exec() or if CLONE_SETTLS - * but it's simpler to do it here. To avoid confusing the rest - * of the code ensure that we have a sve_state allocated - * whenever za_state is allocated. + * enabled we should retain the ZA and ZT state so duplicate + * it here. This may be shortly freed if we exec() or if + * CLONE_SETTLS but it's simpler to do it here. To avoid + * confusing the rest of the code ensure that we have a + * sve_state allocated whenever sme_state is allocated. */ if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), GFP_KERNEL); if (!dst->thread.sve_state) return -ENOMEM; - dst->thread.za_state = kmemdup(src->thread.za_state, - za_state_size(src), - GFP_KERNEL); - if (!dst->thread.za_state) { + + dst->thread.sme_state = kmemdup(src->thread.sme_state, + sme_state_size(src), + GFP_KERNEL); + if (!dst->thread.sme_state) { kfree(dst->thread.sve_state); dst->thread.sve_state = NULL; return -ENOMEM; } } else { - dst->thread.za_state = NULL; + dst->thread.sme_state = NULL; clear_tsk_thread_flag(dst, TIF_SME); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 0c321ad23cd3..d7f4f0d1ae12 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -683,7 +683,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, unsigned long tls[2]; tls[0] = target->thread.uw.tp_value; - if (system_supports_sme()) + if (system_supports_tpidr2()) tls[1] = target->thread.tpidr2_el0; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); @@ -691,7 +691,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, return ret; target->thread.uw.tp_value = tls[0]; - if (system_supports_sme()) + if (system_supports_tpidr2()) target->thread.tpidr2_el0 = tls[1]; return ret; @@ -1045,7 +1045,7 @@ static int za_get(struct task_struct *target, if (thread_za_enabled(&target->thread)) { start = end; end = ZA_PT_SIZE(vq); - membuf_write(&to, target->thread.za_state, end - start); + membuf_write(&to, target->thread.sme_state, end - start); } /* Zero any trailing padding */ @@ -1099,7 +1099,7 @@ static int za_set(struct task_struct *target, /* Allocate/reinit ZA storage */ sme_alloc(target); - if (!target->thread.za_state) { + if (!target->thread.sme_state) { ret = -ENOMEM; goto out; } @@ -1124,7 +1124,7 @@ static int za_set(struct task_struct *target, start = ZA_PT_ZA_OFFSET; end = ZA_PT_SIZE(vq); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.za_state, + target->thread.sme_state, start, end); if (ret) goto out; @@ -1138,6 +1138,51 @@ out: return ret; } +static int zt_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme2()) + return -EINVAL; + + /* + * If PSTATE.ZA is not set then ZT will be zeroed when it is + * enabled so report the current register value as zero. + */ + if (thread_za_enabled(&target->thread)) + membuf_write(&to, thread_zt_state(&target->thread), + ZT_SIG_REG_BYTES); + else + membuf_zero(&to, ZT_SIG_REG_BYTES); + + return 0; +} + +static int zt_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!system_supports_sme2()) + return -EINVAL; + + if (!thread_za_enabled(&target->thread)) { + sme_alloc(target); + if (!target->thread.sme_state) + return -ENOMEM; + } + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + thread_zt_state(&target->thread), + 0, ZT_SIG_REG_BYTES); + if (ret == 0) + target->thread.svcr |= SVCR_ZA_MASK; + + return ret; +} + #endif /* CONFIG_ARM64_SME */ #ifdef CONFIG_ARM64_PTR_AUTH @@ -1360,6 +1405,7 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_SME REGSET_SSVE, REGSET_ZA, + REGSET_ZT, #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, @@ -1467,6 +1513,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = za_get, .set = za_set, }, + [REGSET_ZT] = { /* SME ZT */ + .core_note_type = NT_ARM_ZT, + .n = 1, + .size = ZT_SIG_REG_BYTES, + .align = sizeof(u64), + .regset_get = zt_get, + .set = zt_set, + }, #endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 12cfe9d0d3fa..b8ec7b3ac9cb 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -58,6 +58,7 @@ static int num_standard_resources; static struct resource *standard_resources; phys_addr_t __fdt_pointer __initdata; +u64 mmu_enabled_at_boot __initdata; /* * Standard memory resources @@ -332,8 +333,12 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) xen_early_init(); efi_init(); - if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0) - pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + if (!efi_enabled(EFI_BOOT)) { + if ((u64)_text % MIN_KIMG_ALIGN) + pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND, + FW_BUG "Booted with MMU enabled!"); + } arm64_memblock_init(); @@ -442,3 +447,11 @@ static int __init register_arm64_panic_block(void) return 0; } device_initcall(register_arm64_panic_block); + +static int __init check_mmu_enabled_at_boot(void) +{ + if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot) + panic("Non-EFI boot detected with MMU and caches enabled"); + return 0; +} +device_initcall_sync(check_mmu_enabled_at_boot); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index be279fd48248..06a02707f488 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -56,7 +56,9 @@ struct rt_sigframe_user_layout { unsigned long fpsimd_offset; unsigned long esr_offset; unsigned long sve_offset; + unsigned long tpidr2_offset; unsigned long za_offset; + unsigned long zt_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -168,6 +170,19 @@ static void __user *apply_user_offset( return base + offset; } +struct user_ctxs { + struct fpsimd_context __user *fpsimd; + u32 fpsimd_size; + struct sve_context __user *sve; + u32 sve_size; + struct tpidr2_context __user *tpidr2; + u32 tpidr2_size; + struct za_context __user *za; + u32 za_size; + struct zt_context __user *zt; + u32 zt_size; +}; + static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) { struct user_fpsimd_state const *fpsimd = @@ -186,25 +201,20 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) return err ? -EFAULT : 0; } -static int restore_fpsimd_context(struct fpsimd_context __user *ctx) +static int restore_fpsimd_context(struct user_ctxs *user) { struct user_fpsimd_state fpsimd; - __u32 magic, size; int err = 0; - /* check the magic/size information */ - __get_user_error(magic, &ctx->head.magic, err); - __get_user_error(size, &ctx->head.size, err); - if (err) - return -EFAULT; - if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) + /* check the size information */ + if (user->fpsimd_size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ - err = __copy_from_user(fpsimd.vregs, ctx->vregs, + err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), sizeof(fpsimd.vregs)); - __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); - __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); + __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); + __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); clear_thread_flag(TIF_SVE); current->thread.fp_type = FP_STATE_FPSIMD; @@ -217,12 +227,6 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) } -struct user_ctxs { - struct fpsimd_context __user *fpsimd; - struct sve_context __user *sve; - struct za_context __user *za; -}; - #ifdef CONFIG_ARM64_SVE static int preserve_sve_context(struct sve_context __user *ctx) @@ -267,15 +271,20 @@ static int preserve_sve_context(struct sve_context __user *ctx) static int restore_sve_fpsimd_context(struct user_ctxs *user) { - int err; + int err = 0; unsigned int vl, vq; struct user_fpsimd_state fpsimd; - struct sve_context sve; + u16 user_vl, flags; - if (__copy_from_user(&sve, user->sve, sizeof(sve))) - return -EFAULT; + if (user->sve_size < sizeof(*user->sve)) + return -EINVAL; + + __get_user_error(user_vl, &(user->sve->vl), err); + __get_user_error(flags, &(user->sve->flags), err); + if (err) + return err; - if (sve.flags & SVE_SIG_FLAG_SM) { + if (flags & SVE_SIG_FLAG_SM) { if (!system_supports_sme()) return -EINVAL; @@ -292,19 +301,19 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) vl = task_get_sve_vl(current); } - if (sve.vl != vl) + if (user_vl != vl) return -EINVAL; - if (sve.head.size <= sizeof(*user->sve)) { + if (user->sve_size == sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); current->thread.svcr &= ~SVCR_SM_MASK; current->thread.fp_type = FP_STATE_FPSIMD; goto fpsimd_only; } - vq = sve_vq_from_vl(sve.vl); + vq = sve_vq_from_vl(vl); - if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) + if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) return -EINVAL; /* @@ -330,7 +339,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (err) return -EFAULT; - if (sve.flags & SVE_SIG_FLAG_SM) + if (flags & SVE_SIG_FLAG_SM) current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); @@ -366,6 +375,34 @@ extern int preserve_sve_context(void __user *ctx); #ifdef CONFIG_ARM64_SME +static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) +{ + int err = 0; + + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + + __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(*ctx), &ctx->head.size, err); + __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); + + return err; +} + +static int restore_tpidr2_context(struct user_ctxs *user) +{ + u64 tpidr2_el0; + int err = 0; + + if (user->tpidr2_size != sizeof(*user->tpidr2)) + return -EINVAL; + + __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); + if (!err) + current->thread.tpidr2_el0 = tpidr2_el0; + + return err; +} + static int preserve_za_context(struct za_context __user *ctx) { int err = 0; @@ -394,7 +431,7 @@ static int preserve_za_context(struct za_context __user *ctx) * fpsimd_signal_preserve_current_state(). */ err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, - current->thread.za_state, + current->thread.sme_state, ZA_SIG_REGS_SIZE(vq)); } @@ -403,29 +440,33 @@ static int preserve_za_context(struct za_context __user *ctx) static int restore_za_context(struct user_ctxs *user) { - int err; + int err = 0; unsigned int vq; - struct za_context za; + u16 user_vl; - if (__copy_from_user(&za, user->za, sizeof(za))) - return -EFAULT; + if (user->za_size < sizeof(*user->za)) + return -EINVAL; + + __get_user_error(user_vl, &(user->za->vl), err); + if (err) + return err; - if (za.vl != task_get_sme_vl(current)) + if (user_vl != task_get_sme_vl(current)) return -EINVAL; - if (za.head.size <= sizeof(*user->za)) { + if (user->za_size == sizeof(*user->za)) { current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } - vq = sve_vq_from_vl(za.vl); + vq = sve_vq_from_vl(user_vl); - if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq)) + if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) return -EINVAL; /* * Careful: we are about __copy_from_user() directly into - * thread.za_state with preemption enabled, so protection is + * thread.sme_state with preemption enabled, so protection is * needed to prevent a racing context switch from writing stale * registers back over the new data. */ @@ -434,13 +475,13 @@ static int restore_za_context(struct user_ctxs *user) /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ sme_alloc(current); - if (!current->thread.za_state) { + if (!current->thread.sme_state) { current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } - err = __copy_from_user(current->thread.za_state, + err = __copy_from_user(current->thread.sme_state, (char __user const *)user->za + ZA_SIG_REGS_OFFSET, ZA_SIG_REGS_SIZE(vq)); @@ -452,11 +493,83 @@ static int restore_za_context(struct user_ctxs *user) return 0; } + +static int preserve_zt_context(struct zt_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + + if (WARN_ON(!thread_za_enabled(¤t->thread))) + return -EINVAL; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZT_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), + &ctx->head.size, err); + __put_user_error(1, &ctx->nregs, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + /* + * This assumes that the ZT state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, + thread_zt_state(¤t->thread), + ZT_SIG_REGS_SIZE(1)); + + return err ? -EFAULT : 0; +} + +static int restore_zt_context(struct user_ctxs *user) +{ + int err; + u16 nregs; + + /* ZA must be restored first for this check to be valid */ + if (!thread_za_enabled(¤t->thread)) + return -EINVAL; + + if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) + return -EINVAL; + + if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) + return -EFAULT; + + if (nregs != 1) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.zt_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ + + err = __copy_from_user(thread_zt_state(¤t->thread), + (char __user const *)user->zt + + ZT_SIG_REGS_OFFSET, + ZT_SIG_REGS_SIZE(1)); + if (err) + return -EFAULT; + + return 0; +} + #else /* ! CONFIG_ARM64_SME */ /* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_tpidr2_context(void __user *ctx); +extern int restore_tpidr2_context(struct user_ctxs *user); extern int preserve_za_context(void __user *ctx); extern int restore_za_context(struct user_ctxs *user); +extern int preserve_zt_context(void __user *ctx); +extern int restore_zt_context(struct user_ctxs *user); #endif /* ! CONFIG_ARM64_SME */ @@ -473,7 +586,9 @@ static int parse_user_sigframe(struct user_ctxs *user, user->fpsimd = NULL; user->sve = NULL; + user->tpidr2 = NULL; user->za = NULL; + user->zt = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -516,10 +631,8 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->fpsimd) goto invalid; - if (size < sizeof(*user->fpsimd)) - goto invalid; - user->fpsimd = (struct fpsimd_context __user *)head; + user->fpsimd_size = size; break; case ESR_MAGIC: @@ -533,10 +646,19 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->sve) goto invalid; - if (size < sizeof(*user->sve)) + user->sve = (struct sve_context __user *)head; + user->sve_size = size; + break; + + case TPIDR2_MAGIC: + if (!system_supports_sme()) goto invalid; - user->sve = (struct sve_context __user *)head; + if (user->tpidr2) + goto invalid; + + user->tpidr2 = (struct tpidr2_context __user *)head; + user->tpidr2_size = size; break; case ZA_MAGIC: @@ -546,10 +668,19 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->za) goto invalid; - if (size < sizeof(*user->za)) + user->za = (struct za_context __user *)head; + user->za_size = size; + break; + + case ZT_MAGIC: + if (!system_supports_sme2()) goto invalid; - user->za = (struct za_context __user *)head; + if (user->zt) + goto invalid; + + user->zt = (struct zt_context __user *)head; + user->zt_size = size; break; case EXTRA_MAGIC: @@ -668,12 +799,18 @@ static int restore_sigframe(struct pt_regs *regs, if (user.sve) err = restore_sve_fpsimd_context(&user); else - err = restore_fpsimd_context(user.fpsimd); + err = restore_fpsimd_context(&user); } + if (err == 0 && system_supports_sme() && user.tpidr2) + err = restore_tpidr2_context(&user); + if (err == 0 && system_supports_sme() && user.za) err = restore_za_context(&user); + if (err == 0 && system_supports_sme2() && user.zt) + err = restore_zt_context(&user); + return err; } @@ -765,6 +902,11 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, else vl = task_get_sme_vl(current); + err = sigframe_alloc(user, &user->tpidr2_offset, + sizeof(struct tpidr2_context)); + if (err) + return err; + if (thread_za_enabled(¤t->thread)) vq = sve_vq_from_vl(vl); @@ -774,6 +916,15 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return err; } + if (system_supports_sme2()) { + if (add_all || thread_za_enabled(¤t->thread)) { + err = sigframe_alloc(user, &user->zt_offset, + ZT_SIG_CONTEXT_SIZE(1)); + if (err) + return err; + } + } + return sigframe_alloc_end(user); } @@ -822,6 +973,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_sve_context(sve_ctx); } + /* TPIDR2 if supported */ + if (system_supports_sme() && err == 0) { + struct tpidr2_context __user *tpidr2_ctx = + apply_user_offset(user, user->tpidr2_offset); + err |= preserve_tpidr2_context(tpidr2_ctx); + } + /* ZA state if present */ if (system_supports_sme() && err == 0 && user->za_offset) { struct za_context __user *za_ctx = @@ -829,6 +987,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_za_context(za_ctx); } + /* ZT state if present */ + if (system_supports_sme2() && err == 0 && user->zt_offset) { + struct zt_context __user *zt_ctx = + apply_user_offset(user, user->zt_offset); + err |= preserve_zt_context(zt_ctx); + } + if (err == 0 && user->extra_offset) { char __user *sfp = (char __user *)user->sigframe; char __user *userp = diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 97c9de57725d..2ae7cff1953a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -99,8 +99,9 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) + mov x0, xzr bl init_kernel_el - bl finalise_el2 + mov x19, x0 // preserve boot mode #if VA_BITS > 48 ldr_l x0, vabits_actual #endif @@ -116,6 +117,9 @@ SYM_CODE_END(cpu_resume) .popsection SYM_FUNC_START(_cpu_resume) + mov x0, x19 + bl finalise_el2 + mrs x1, mpidr_el1 adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index a5de47e3df2b..da84cf855c44 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -173,12 +173,8 @@ static inline void fp_user_discard(void) * register state to track, if this changes the KVM code will * need updating. */ - if (system_supports_sme() && test_thread_flag(TIF_SME)) { - u64 svcr = read_sysreg_s(SYS_SVCR); - - if (svcr & SVCR_SM_MASK) - sme_smstop_sm(); - } + if (system_supports_sme()) + sme_smstop_sm(); if (!system_supports_sve()) return; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 87f42eb1c950..a6dd3e90755c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -163,10 +163,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) if (!bad) p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); - else { - p += sprintf(p, "bad PC value"); - break; - } + else + p += sprintf(p, i == 0 ? "(????????) " : "???????? "); } printk("%sCode: %s\n", lvl, str); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 2777214cbf1a..b9202c2ee18e 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -93,6 +93,7 @@ jiffies = jiffies_64; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ + ALIGN_FUNCTION(); \ __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ __hibernate_exit_text_end = .; @@ -102,6 +103,7 @@ jiffies = jiffies_64; #ifdef CONFIG_KEXEC_CORE #define KEXEC_TEXT \ + ALIGN_FUNCTION(); \ __relocate_new_kernel_start = .; \ *(.kexec_relocate.text) \ __relocate_new_kernel_end = .; @@ -178,7 +180,6 @@ SECTIONS LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT - IDMAP_TEXT *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ @@ -205,6 +206,7 @@ SECTIONS TRAMP_TEXT HIBERNATE_TEXT KEXEC_TEXT + IDMAP_TEXT . = ALIGN(PAGE_SIZE); } @@ -354,6 +356,8 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, #ifdef CONFIG_HIBERNATION ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, "Hibernate exit text is bigger than 4 KiB") +ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit, + "Hibernate exit text does not start with swsusp_arch_suspend_exit") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, @@ -380,4 +384,6 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, "kexec relocation code is bigger than 4 KiB") ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") +ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel, + "kexec control page does not start with arm64_relocate_new_kernel") #endif diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index fccf9ec01813..55f80fb93925 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -328,7 +328,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * we may need to check if the host state needs to be saved. */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 02dd7e9ebd39..235775d0c825 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -143,7 +143,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fp_state.st = &vcpu->arch.ctxt.fp_regs; fp_state.sve_state = vcpu->arch.sve_state; fp_state.sve_vl = vcpu->arch.sve_max_vl; - fp_state.za_state = NULL; + fp_state.sme_state = NULL; fp_state.svcr = &vcpu->arch.svcr; fp_state.fp_type = &vcpu->arch.fp_type; diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 435346ea1504..f3aa7738b477 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -171,7 +171,7 @@ alternative_else dsb sy // Synchronize against in-flight ld/st isb // Prevent an early read of side-effect free ISR mrs x2, isr_el1 - tbnz x2, #8, 2f // ISR_EL1.A + tbnz x2, #ISR_EL1_A_SHIFT, 2f ret nop 2: diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index e17455773b98..2673bde62fad 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -27,7 +27,7 @@ static void __debug_save_spe(u64 *pmscr_el1) * Check if the host is actually using it ? */ reg = read_sysreg_s(SYS_PMBLIMITR_EL1); - if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) + if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT))) return; /* Yes; save the control register and disable data generation */ diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 081058d4e436..503567c864fd 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou) caches_clean_inval_pou_macro ret SYM_FUNC_END(caches_clean_inval_pou) +SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou) /* * caches_clean_inval_user_pou(start,end) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d77c9f56b7b4..6f9d8898a025 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -133,7 +133,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift) return phys; } -static bool pgattr_change_is_safe(u64 old, u64 new) +bool pgattr_change_is_safe(u64 old, u64 new) { /* * The following mapping attributes may be updated in live @@ -142,9 +142,13 @@ static bool pgattr_change_is_safe(u64 old, u64 new) pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ - if (old == 0 || new == 0) + if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) return true; + /* A live entry's pfn should not change */ + if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) + return false; + /* live contiguous mappings may not be manipulated at all */ if ((old | new) & PTE_CONT) return false; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 066fa60b93d2..91410f488090 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -110,7 +110,6 @@ SYM_FUNC_END(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "awx" SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -166,7 +165,6 @@ alternative_else_nop_endif isb ret SYM_FUNC_END(cpu_do_resume) - .popsection #endif .pushsection ".idmap.text", "awx" diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index dfeb2c51e257..10dcfa13390a 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -28,7 +28,9 @@ HAS_GENERIC_AUTH HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF -HAS_IRQ_PRIO_MASKING +HAS_GIC_CPUIF_SYSREGS +HAS_GIC_PRIO_MASKING +HAS_GIC_PRIO_RELAXED_SYNC HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD @@ -38,7 +40,6 @@ HAS_RAS_EXTN HAS_RNG HAS_SB HAS_STAGE2_FWB -HAS_SYSREG_GIC_CPUIF HAS_TIDCP1 HAS_TLB_RANGE HAS_VIRT_HOST_EXTN @@ -50,6 +51,7 @@ MTE MTE_ASYMM SME SME_FA64 +SME2 SPECTRE_V2 SPECTRE_V3A SPECTRE_V4 diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index c350164a3955..7f27d66a17e1 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -44,6 +44,11 @@ function define_field(reg, field, msb, lsb) { define(reg "_" field "_WIDTH", msb - lsb + 1) } +# Print a field _SIGNED definition for a field +function define_field_sign(reg, field, sign) { + define(reg "_" field "_SIGNED", sign) +} + # Parse a "<msb>[:<lsb>]" string into the global variables @msb and @lsb function parse_bitdef(reg, field, bitdef, _bits) { @@ -233,6 +238,30 @@ END { next } +/^SignedEnum/ { + change_block("Enum<", "Sysreg", "Enum") + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + define_field_sign(reg, field, "true") + + next +} + +/^UnsignedEnum/ { + change_block("Enum<", "Sysreg", "Enum") + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + define_field_sign(reg, field, "false") + + next +} + /^Enum/ { change_block("Enum", "Sysreg", "Enum") expect_fields(3) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 184e58fd5631..94d78acafb67 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -48,26 +48,26 @@ Sysreg ID_PFR0_EL1 3 0 0 1 0 Res0 63:32 -Enum 31:28 RAS +UnsignedEnum 31:28 RAS 0b0000 NI 0b0001 RAS 0b0010 RASv1p1 EndEnum -Enum 27:24 DIT +UnsignedEnum 27:24 DIT 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 AMU +UnsignedEnum 23:20 AMU 0b0000 NI 0b0001 AMUv1 0b0010 AMUv1p1 EndEnum -Enum 19:16 CSV2 +UnsignedEnum 19:16 CSV2 0b0000 UNDISCLOSED 0b0001 IMP 0b0010 CSV2p1 EndEnum -Enum 15:12 State3 +UnsignedEnum 15:12 State3 0b0000 NI 0b0001 IMP EndEnum @@ -76,12 +76,12 @@ Enum 11:8 State2 0b0001 NO_CV 0b0010 CV EndEnum -Enum 7:4 State1 +UnsignedEnum 7:4 State1 0b0000 NI 0b0001 THUMB 0b0010 THUMB2 EndEnum -Enum 3:0 State0 +UnsignedEnum 3:0 State0 0b0000 NI 0b0001 IMP EndEnum @@ -89,12 +89,12 @@ EndSysreg Sysreg ID_PFR1_EL1 3 0 0 1 1 Res0 63:32 -Enum 31:28 GIC +UnsignedEnum 31:28 GIC 0b0000 NI 0b0001 GICv3 0b0010 GICv4p1 EndEnum -Enum 27:24 Virt_frac +UnsignedEnum 27:24 Virt_frac 0b0000 NI 0b0001 IMP EndEnum @@ -103,16 +103,16 @@ Enum 23:20 Sec_frac 0b0001 WALK_DISABLE 0b0010 SECURE_MEMORY EndEnum -Enum 19:16 GenTimer +UnsignedEnum 19:16 GenTimer 0b0000 NI 0b0001 IMP 0b0010 ECV EndEnum -Enum 15:12 Virtualization +UnsignedEnum 15:12 Virtualization 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 MProgMod +UnsignedEnum 11:8 MProgMod 0b0000 NI 0b0001 IMP EndEnum @@ -121,7 +121,7 @@ Enum 7:4 Security 0b0001 EL3 0b0001 NSACR_RFR EndEnum -Enum 3:0 ProgMod +UnsignedEnum 3:0 ProgMod 0b0000 NI 0b0001 IMP EndEnum @@ -129,11 +129,11 @@ EndSysreg Sysreg ID_DFR0_EL1 3 0 0 1 2 Res0 63:32 -Enum 31:28 TraceFilt +UnsignedEnum 31:28 TraceFilt 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 PerfMon +UnsignedEnum 27:24 PerfMon 0b0000 NI 0b0001 PMUv1 0b0010 PMUv2 @@ -192,7 +192,7 @@ Enum 31:28 InnerShr 0b0001 HW 0b1111 IGNORED EndEnum -Enum 27:24 FCSE +UnsignedEnum 27:24 FCSE 0b0000 NI 0b0001 IMP EndEnum @@ -369,7 +369,7 @@ Enum 27:24 Divide 0b0001 xDIV_T32 0b0010 xDIV_A32 EndEnum -Enum 23:20 Debug +UnsignedEnum 23:20 Debug 0b0000 NI 0b0001 IMP EndEnum @@ -380,19 +380,19 @@ Enum 19:16 Coproc 0b0011 MRRC 0b0100 MRRC2 EndEnum -Enum 15:12 CmpBranch +UnsignedEnum 15:12 CmpBranch 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 BitField +UnsignedEnum 11:8 BitField 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 BitCount +UnsignedEnum 7:4 BitCount 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 Swap +UnsignedEnum 3:0 Swap 0b0000 NI 0b0001 IMP EndEnum @@ -562,33 +562,33 @@ EndSysreg Sysreg ID_ISAR5_EL1 3 0 0 2 5 Res0 63:32 -Enum 31:28 VCMA +UnsignedEnum 31:28 VCMA 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 RDM +UnsignedEnum 27:24 RDM 0b0000 NI 0b0001 IMP EndEnum Res0 23:20 -Enum 19:16 CRC32 +UnsignedEnum 19:16 CRC32 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SHA2 +UnsignedEnum 15:12 SHA2 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 SHA1 +UnsignedEnum 11:8 SHA1 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 IMP 0b0010 VMULL EndEnum -Enum 3:0 SEVL +UnsignedEnum 3:0 SEVL 0b0000 NI 0b0001 IMP EndEnum @@ -596,31 +596,31 @@ EndSysreg Sysreg ID_ISAR6_EL1 3 0 0 2 7 Res0 63:28 -Enum 27:24 I8MM +UnsignedEnum 27:24 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 BF16 +UnsignedEnum 23:20 BF16 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 SPECRES +UnsignedEnum 19:16 SPECRES 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SB +UnsignedEnum 15:12 SB 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 FHM +UnsignedEnum 11:8 FHM 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 DP +UnsignedEnum 7:4 DP 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 JSCVT +UnsignedEnum 3:0 JSCVT 0b0000 NI 0b0001 IMP EndEnum @@ -628,37 +628,37 @@ EndSysreg Sysreg ID_MMFR4_EL1 3 0 0 2 6 Res0 63:32 -Enum 31:28 EVT +UnsignedEnum 31:28 EVT 0b0000 NI 0b0001 NO_TLBIS 0b0010 TLBIS EndEnum -Enum 27:24 CCIDX +UnsignedEnum 27:24 CCIDX 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 LSM +UnsignedEnum 23:20 LSM 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 HPDS +UnsignedEnum 19:16 HPDS 0b0000 NI 0b0001 AA32HPD 0b0010 HPDS2 EndEnum -Enum 15:12 CnP +UnsignedEnum 15:12 CnP 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 XNX +UnsignedEnum 11:8 XNX 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AC2 +UnsignedEnum 7:4 AC2 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 SpecSEI +UnsignedEnum 3:0 SpecSEI 0b0000 NI 0b0001 IMP EndEnum @@ -666,77 +666,77 @@ EndSysreg Sysreg MVFR0_EL1 3 0 0 3 0 Res0 63:32 -Enum 31:28 FPRound +UnsignedEnum 31:28 FPRound 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 FPShVec +UnsignedEnum 27:24 FPShVec 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 FPSqrt +UnsignedEnum 23:20 FPSqrt 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 FPDivide +UnsignedEnum 19:16 FPDivide 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 FPTrap +UnsignedEnum 15:12 FPTrap 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 FPDP +UnsignedEnum 11:8 FPDP 0b0000 NI 0b0001 VFPv2 - 0b0001 VFPv3 + 0b0010 VFPv3 EndEnum -Enum 7:4 FPSP +UnsignedEnum 7:4 FPSP 0b0000 NI 0b0001 VFPv2 - 0b0001 VFPv3 + 0b0010 VFPv3 EndEnum Enum 3:0 SIMDReg 0b0000 NI 0b0001 IMP_16x64 - 0b0001 IMP_32x64 + 0b0010 IMP_32x64 EndEnum EndSysreg Sysreg MVFR1_EL1 3 0 0 3 1 Res0 63:32 -Enum 31:28 SIMDFMAC +UnsignedEnum 31:28 SIMDFMAC 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 FPHP +UnsignedEnum 27:24 FPHP 0b0000 NI 0b0001 FPHP 0b0010 FPHP_CONV 0b0011 FP16 EndEnum -Enum 23:20 SIMDHP +UnsignedEnum 23:20 SIMDHP 0b0000 NI 0b0001 SIMDHP - 0b0001 SIMDHP_FLOAT + 0b0010 SIMDHP_FLOAT EndEnum -Enum 19:16 SIMDSP +UnsignedEnum 19:16 SIMDSP 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SIMDInt +UnsignedEnum 15:12 SIMDInt 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 SIMDLS +UnsignedEnum 11:8 SIMDLS 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 FPDNaN +UnsignedEnum 7:4 FPDNaN 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 FPFtZ +UnsignedEnum 3:0 FPFtZ 0b0000 NI 0b0001 IMP EndEnum @@ -761,15 +761,15 @@ EndSysreg Sysreg ID_PFR2_EL1 3 0 0 3 4 Res0 63:12 -Enum 11:8 RAS_frac +UnsignedEnum 11:8 RAS_frac 0b0000 NI 0b0001 RASv1p1 EndEnum -Enum 7:4 SSBS +UnsignedEnum 7:4 SSBS 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 CSV3 +UnsignedEnum 3:0 CSV3 0b0000 NI 0b0001 IMP EndEnum @@ -777,7 +777,7 @@ EndSysreg Sysreg ID_DFR1_EL1 3 0 0 3 5 Res0 63:8 -Enum 7:4 HPMN0 +UnsignedEnum 7:4 HPMN0 0b0000 NI 0b0001 IMP EndEnum @@ -790,87 +790,87 @@ EndSysreg Sysreg ID_MMFR5_EL1 3 0 0 3 6 Res0 63:8 -Enum 7:4 nTLBPA +UnsignedEnum 7:4 nTLBPA 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 ETS +UnsignedEnum 3:0 ETS 0b0000 NI 0b0001 IMP EndEnum EndSysreg Sysreg ID_AA64PFR0_EL1 3 0 0 4 0 -Enum 63:60 CSV3 +UnsignedEnum 63:60 CSV3 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 CSV2 +UnsignedEnum 59:56 CSV2 0b0000 NI 0b0001 IMP 0b0010 CSV2_2 0b0011 CSV2_3 EndEnum -Enum 55:52 RME +UnsignedEnum 55:52 RME 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 DIT +UnsignedEnum 51:48 DIT 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 AMU +UnsignedEnum 47:44 AMU 0b0000 NI 0b0001 IMP 0b0010 V1P1 EndEnum -Enum 43:40 MPAM +UnsignedEnum 43:40 MPAM 0b0000 0 0b0001 1 EndEnum -Enum 39:36 SEL2 +UnsignedEnum 39:36 SEL2 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 SVE +UnsignedEnum 35:32 SVE 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 RAS +UnsignedEnum 31:28 RAS 0b0000 NI 0b0001 IMP 0b0010 V1P1 EndEnum -Enum 27:24 GIC +UnsignedEnum 27:24 GIC 0b0000 NI 0b0001 IMP 0b0010 V4P1 EndEnum -Enum 23:20 AdvSIMD +SignedEnum 23:20 AdvSIMD 0b0000 IMP 0b0001 FP16 0b1111 NI EndEnum -Enum 19:16 FP +SignedEnum 19:16 FP 0b0000 IMP 0b0001 FP16 0b1111 NI EndEnum -Enum 15:12 EL3 +UnsignedEnum 15:12 EL3 0b0000 NI 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 11:8 EL2 +UnsignedEnum 11:8 EL2 0b0000 NI 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 7:4 EL1 +UnsignedEnum 7:4 EL1 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 3:0 EL0 +UnsignedEnum 3:0 EL0 0b0001 IMP 0b0010 AARCH32 EndEnum @@ -878,44 +878,45 @@ EndSysreg Sysreg ID_AA64PFR1_EL1 3 0 0 4 1 Res0 63:40 -Enum 39:36 NMI +UnsignedEnum 39:36 NMI 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 CSV2_frac +UnsignedEnum 35:32 CSV2_frac 0b0000 NI 0b0001 CSV2_1p1 0b0010 CSV2_1p2 EndEnum -Enum 31:28 RNDR_trap +UnsignedEnum 31:28 RNDR_trap 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 SME +UnsignedEnum 27:24 SME 0b0000 NI 0b0001 IMP + 0b0010 SME2 EndEnum Res0 23:20 -Enum 19:16 MPAM_frac +UnsignedEnum 19:16 MPAM_frac 0b0000 MINOR_0 0b0001 MINOR_1 EndEnum -Enum 15:12 RAS_frac +UnsignedEnum 15:12 RAS_frac 0b0000 NI 0b0001 RASv1p1 EndEnum -Enum 11:8 MTE +UnsignedEnum 11:8 MTE 0b0000 NI 0b0001 IMP 0b0010 MTE2 0b0011 MTE3 EndEnum -Enum 7:4 SSBS +UnsignedEnum 7:4 SSBS 0b0000 NI 0b0001 IMP 0b0010 SSBS2 EndEnum -Enum 3:0 BT +UnsignedEnum 3:0 BT 0b0000 NI 0b0001 IMP EndEnum @@ -923,45 +924,45 @@ EndSysreg Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Res0 63:60 -Enum 59:56 F64MM +UnsignedEnum 59:56 F64MM 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 F32MM +UnsignedEnum 55:52 F32MM 0b0000 NI 0b0001 IMP EndEnum Res0 51:48 -Enum 47:44 I8MM +UnsignedEnum 47:44 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 SM4 +UnsignedEnum 43:40 SM4 0b0000 NI 0b0001 IMP EndEnum Res0 39:36 -Enum 35:32 SHA3 +UnsignedEnum 35:32 SHA3 0b0000 NI 0b0001 IMP EndEnum Res0 31:24 -Enum 23:20 BF16 +UnsignedEnum 23:20 BF16 0b0000 NI 0b0001 IMP 0b0010 EBF16 EndEnum -Enum 19:16 BitPerm +UnsignedEnum 19:16 BitPerm 0b0000 NI 0b0001 IMP EndEnum Res0 15:8 -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 IMP 0b0010 PMULL128 EndEnum -Enum 3:0 SVEver +UnsignedEnum 3:0 SVEver 0b0000 IMP 0b0001 SVE2 0b0010 SVE2p1 @@ -969,38 +970,56 @@ EndEnum EndSysreg Sysreg ID_AA64SMFR0_EL1 3 0 0 4 5 -Enum 63 FA64 +UnsignedEnum 63 FA64 0b0 NI 0b1 IMP EndEnum Res0 62:60 -Enum 59:56 SMEver +UnsignedEnum 59:56 SMEver + 0b0000 SME + 0b0001 SME2 + 0b0010 SME2p1 0b0000 IMP EndEnum -Enum 55:52 I16I64 +UnsignedEnum 55:52 I16I64 0b0000 NI 0b1111 IMP EndEnum Res0 51:49 -Enum 48 F64F64 +UnsignedEnum 48 F64F64 0b0 NI 0b1 IMP EndEnum -Res0 47:40 -Enum 39:36 I8I32 +UnsignedEnum 47:44 I16I32 + 0b0000 NI + 0b0101 IMP +EndEnum +UnsignedEnum 43 B16B16 + 0b0 NI + 0b1 IMP +EndEnum +UnsignedEnum 42 F16F16 + 0b0 NI + 0b1 IMP +EndEnum +Res0 41:40 +UnsignedEnum 39:36 I8I32 0b0000 NI 0b1111 IMP EndEnum -Enum 35 F16F32 +UnsignedEnum 35 F16F32 + 0b0 NI + 0b1 IMP +EndEnum +UnsignedEnum 34 B16F32 0b0 NI 0b1 IMP EndEnum -Enum 34 B16F32 +UnsignedEnum 33 BI32I32 0b0 NI 0b1 IMP EndEnum -Res0 33 -Enum 32 F32F32 +UnsignedEnum 32 F32F32 0b0 NI 0b1 IMP EndEnum @@ -1013,7 +1032,7 @@ Enum 63:60 HPMN0 0b0001 DEF EndEnum Res0 59:56 -Enum 55:52 BRBE +UnsignedEnum 55:52 BRBE 0b0000 NI 0b0001 IMP 0b0010 BRBE_V1P1 @@ -1023,19 +1042,19 @@ Enum 51:48 MTPMU 0b0001 IMP 0b1111 NI EndEnum -Enum 47:44 TraceBuffer +UnsignedEnum 47:44 TraceBuffer 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 TraceFilt +UnsignedEnum 43:40 TraceFilt 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 DoubleLock +UnsignedEnum 39:36 DoubleLock 0b0000 IMP 0b1111 NI EndEnum -Enum 35:32 PMSVer +UnsignedEnum 35:32 PMSVer 0b0000 NI 0b0001 IMP 0b0010 V1P1 @@ -1047,7 +1066,7 @@ Res0 27:24 Field 23:20 WRPs Res0 19:16 Field 15:12 BRPs -Enum 11:8 PMUVer +UnsignedEnum 11:8 PMUVer 0b0000 NI 0b0001 IMP 0b0100 V3P1 @@ -1057,11 +1076,11 @@ Enum 11:8 PMUVer 0b1000 V3P8 0b1111 IMP_DEF EndEnum -Enum 7:4 TraceVer +UnsignedEnum 7:4 TraceVer 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 DebugVer +UnsignedEnum 3:0 DebugVer 0b0110 IMP 0b0111 VHE 0b1000 V8P2 @@ -1091,66 +1110,66 @@ Res0 63:0 EndSysreg Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 -Enum 63:60 RNDR +UnsignedEnum 63:60 RNDR 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 TLB +UnsignedEnum 59:56 TLB 0b0000 NI 0b0001 OS 0b0010 RANGE EndEnum -Enum 55:52 TS +UnsignedEnum 55:52 TS 0b0000 NI 0b0001 FLAGM 0b0010 FLAGM2 EndEnum -Enum 51:48 FHM +UnsignedEnum 51:48 FHM 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 DP +UnsignedEnum 47:44 DP 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 SM4 +UnsignedEnum 43:40 SM4 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 SM3 +UnsignedEnum 39:36 SM3 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 SHA3 +UnsignedEnum 35:32 SHA3 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 RDM +UnsignedEnum 31:28 RDM 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 TME +UnsignedEnum 27:24 TME 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 ATOMIC +UnsignedEnum 23:20 ATOMIC 0b0000 NI 0b0010 IMP EndEnum -Enum 19:16 CRC32 +UnsignedEnum 19:16 CRC32 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SHA2 +UnsignedEnum 15:12 SHA2 0b0000 NI 0b0001 SHA256 0b0010 SHA512 EndEnum -Enum 11:8 SHA1 +UnsignedEnum 11:8 SHA1 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 AES 0b0010 PMULL @@ -1159,63 +1178,63 @@ Res0 3:0 EndSysreg Sysreg ID_AA64ISAR1_EL1 3 0 0 6 1 -Enum 63:60 LS64 +UnsignedEnum 63:60 LS64 0b0000 NI 0b0001 LS64 0b0010 LS64_V 0b0011 LS64_ACCDATA EndEnum -Enum 59:56 XS +UnsignedEnum 59:56 XS 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 I8MM +UnsignedEnum 55:52 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 DGH +UnsignedEnum 51:48 DGH 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 BF16 +UnsignedEnum 47:44 BF16 0b0000 NI 0b0001 IMP 0b0010 EBF16 EndEnum -Enum 43:40 SPECRES +UnsignedEnum 43:40 SPECRES 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 SB +UnsignedEnum 39:36 SB 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 FRINTTS +UnsignedEnum 35:32 FRINTTS 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 GPI +UnsignedEnum 31:28 GPI 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 GPA +UnsignedEnum 27:24 GPA 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 LRCPC +UnsignedEnum 23:20 LRCPC 0b0000 NI 0b0001 IMP 0b0010 LRCPC2 EndEnum -Enum 19:16 FCMA +UnsignedEnum 19:16 FCMA 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 JSCVT +UnsignedEnum 15:12 JSCVT 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 API +UnsignedEnum 11:8 API 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1223,7 +1242,7 @@ Enum 11:8 API 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 7:4 APA +UnsignedEnum 7:4 APA 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1231,7 +1250,7 @@ Enum 7:4 APA 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 3:0 DPB +UnsignedEnum 3:0 DPB 0b0000 NI 0b0001 IMP 0b0010 DPB2 @@ -1240,28 +1259,28 @@ EndSysreg Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2 Res0 63:56 -Enum 55:52 CSSC +UnsignedEnum 55:52 CSSC 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 RPRFM +UnsignedEnum 51:48 RPRFM 0b0000 NI 0b0001 IMP EndEnum Res0 47:28 -Enum 27:24 PAC_frac +UnsignedEnum 27:24 PAC_frac 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 BC +UnsignedEnum 23:20 BC 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 MOPS +UnsignedEnum 19:16 MOPS 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 APA3 +UnsignedEnum 15:12 APA3 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1269,32 +1288,32 @@ Enum 15:12 APA3 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 11:8 GPA3 +UnsignedEnum 11:8 GPA3 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 RPRES +UnsignedEnum 7:4 RPRES 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 WFxT +UnsignedEnum 3:0 WFxT 0b0000 NI 0b0010 IMP EndEnum EndSysreg Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0 -Enum 63:60 ECV +UnsignedEnum 63:60 ECV 0b0000 NI 0b0001 IMP 0b0010 CNTPOFF EndEnum -Enum 59:56 FGT +UnsignedEnum 59:56 FGT 0b0000 NI 0b0001 IMP EndEnum Res0 55:48 -Enum 47:44 EXS +UnsignedEnum 47:44 EXS 0b0000 NI 0b0001 IMP EndEnum @@ -1329,15 +1348,15 @@ Enum 23:20 TGRAN16 0b0001 IMP 0b0010 52_BIT EndEnum -Enum 19:16 BIGENDEL0 +UnsignedEnum 19:16 BIGENDEL0 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SNSMEM +UnsignedEnum 15:12 SNSMEM 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 BIGEND +UnsignedEnum 11:8 BIGEND 0b0000 NI 0b0001 IMP EndEnum @@ -1357,62 +1376,62 @@ EndEnum EndSysreg Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1 -Enum 63:60 ECBHB +UnsignedEnum 63:60 ECBHB 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 CMOW +UnsignedEnum 59:56 CMOW 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 TIDCP1 +UnsignedEnum 55:52 TIDCP1 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 nTLBPA +UnsignedEnum 51:48 nTLBPA 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 AFP +UnsignedEnum 47:44 AFP 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 HCX +UnsignedEnum 43:40 HCX 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 ETS +UnsignedEnum 39:36 ETS 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 TWED +UnsignedEnum 35:32 TWED 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 XNX +UnsignedEnum 31:28 XNX 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 SpecSEI +UnsignedEnum 27:24 SpecSEI 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 PAN +UnsignedEnum 23:20 PAN 0b0000 NI 0b0001 IMP 0b0010 PAN2 0b0011 PAN3 EndEnum -Enum 19:16 LO +UnsignedEnum 19:16 LO 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 HPDS +UnsignedEnum 15:12 HPDS 0b0000 NI 0b0001 IMP 0b0010 HPDS2 EndEnum -Enum 11:8 VH +UnsignedEnum 11:8 VH 0b0000 NI 0b0001 IMP EndEnum @@ -1420,7 +1439,7 @@ Enum 7:4 VMIDBits 0b0000 8 0b0010 16 EndEnum -Enum 3:0 HAFDBS +UnsignedEnum 3:0 HAFDBS 0b0000 NI 0b0001 AF 0b0010 DBM @@ -1428,26 +1447,26 @@ EndEnum EndSysreg Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2 -Enum 63:60 E0PD +UnsignedEnum 63:60 E0PD 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 EVT +UnsignedEnum 59:56 EVT 0b0000 NI 0b0001 IMP 0b0010 TTLBxS EndEnum -Enum 55:52 BBM +UnsignedEnum 55:52 BBM 0b0000 0 0b0001 1 0b0010 2 EndEnum -Enum 51:48 TTL +UnsignedEnum 51:48 TTL 0b0000 NI 0b0001 IMP EndEnum Res0 47:44 -Enum 43:40 FWB +UnsignedEnum 43:40 FWB 0b0000 NI 0b0001 IMP EndEnum @@ -1455,7 +1474,7 @@ Enum 39:36 IDS 0b0000 0x0 0b0001 0x18 EndEnum -Enum 35:32 AT +UnsignedEnum 35:32 AT 0b0000 NI 0b0001 IMP EndEnum @@ -1463,7 +1482,7 @@ Enum 31:28 ST 0b0000 39 0b0001 48_47 EndEnum -Enum 27:24 NV +UnsignedEnum 27:24 NV 0b0000 NI 0b0001 IMP 0b0010 NV2 @@ -1476,19 +1495,19 @@ Enum 19:16 VARange 0b0000 48 0b0001 52 EndEnum -Enum 15:12 IESB +UnsignedEnum 15:12 IESB 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 LSM +UnsignedEnum 11:8 LSM 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 UAO +UnsignedEnum 7:4 UAO 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 CnP +UnsignedEnum 3:0 CnP 0b0000 NI 0b0001 IMP EndEnum @@ -1599,7 +1618,8 @@ EndSysreg SysregFields SMCR_ELx Res0 63:32 Field 31 FA64 -Res0 30:9 +Field 30 EZT0 +Res0 29:9 Raz 8:4 Field 3:0 LEN EndSysregFields @@ -1618,6 +1638,130 @@ Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL1 3 0 9 9 0 +Res0 63:8 +Field 7:6 PCT +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E1SPE +Field 0 E0SPE +EndSysreg + +Sysreg PMSNEVFR_EL1 3 0 9 9 1 +Field 63:0 E +EndSysreg + +Sysreg PMSICR_EL1 3 0 9 9 2 +Field 63:56 ECOUNT +Res0 55:32 +Field 31:0 COUNT +EndSysreg + +Sysreg PMSIRR_EL1 3 0 9 9 3 +Res0 63:32 +Field 31:8 INTERVAL +Res0 7:1 +Field 0 RND +EndSysreg + +Sysreg PMSFCR_EL1 3 0 9 9 4 +Res0 63:19 +Field 18 ST +Field 17 LD +Field 16 B +Res0 15:4 +Field 3 FnE +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMSEVFR_EL1 3 0 9 9 5 +Field 63:0 E +EndSysreg + +Sysreg PMSLATFR_EL1 3 0 9 9 6 +Res0 63:16 +Field 15:0 MINLAT +EndSysreg + +Sysreg PMSIDR_EL1 3 0 9 9 7 +Res0 63:25 +Field 24 PBT +Field 23:20 FORMAT +Enum 19:16 COUNTSIZE + 0b0010 12_BIT_SAT + 0b0011 16_BIT_SAT +EndEnum +Field 15:12 MAXSIZE +Enum 11:8 INTERVAL + 0b0000 256 + 0b0010 512 + 0b0011 768 + 0b0100 1024 + 0b0101 1536 + 0b0110 2048 + 0b0111 3072 + 0b1000 4096 +EndEnum +Res0 7 +Field 6 FnE +Field 5 ERND +Field 4 LDS +Field 3 ARCHINST +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMBLIMITR_EL1 3 0 9 10 0 +Field 63:12 LIMIT +Res0 11:6 +Field 5 PMFZ +Res0 4:3 +Enum 2:1 FM + 0b00 FILL + 0b10 DISCARD +EndEnum +Field 0 E +EndSysreg + +Sysreg PMBPTR_EL1 3 0 9 10 1 +Field 63:0 PTR +EndSysreg + +Sysreg PMBSR_EL1 3 0 9 10 3 +Res0 63:32 +Enum 31:26 EC + 0b000000 BUF + 0b100100 FAULT_S1 + 0b100101 FAULT_S2 + 0b011110 FAULT_GPC + 0b011111 IMP_DEF +EndEnum +Res0 25:20 +Field 19 DL +Field 18 EA +Field 17 S +Field 16 COLL +Field 15:0 MSS +EndSysreg + +Sysreg PMBIDR_EL1 3 0 9 10 7 +Res0 63:12 +Enum 11:8 EA + 0b0000 NotDescribed + 0b0001 Ignored + 0b0010 SError +EndEnum +Res0 7:6 +Field 5 F +Field 4 P +Field 3:0 ALIGN +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID @@ -1772,6 +1916,21 @@ Sysreg FAR_EL2 3 4 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL2 3 4 9 9 0 +Res0 63:8 +Enum 7:6 PCT + 0b00 VIRT + 0b01 PHYS + 0b11 GUEST +EndEnum +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E2SPE +Field 0 E0HSPE +EndSysreg + Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg @@ -1842,3 +2001,18 @@ Field 23:16 LD Res0 15:8 Field 7:0 LR EndSysreg + +Sysreg ISR_EL1 3 0 12 1 0 +Res0 63:11 +Field 10 IS +Field 9 FS +Field 8 A +Field 7 I +Field 6 F +Res0 5:0 +EndSysreg + +Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 +Res0 63:24 +Field 23:0 INTID +EndSysreg |