From 81ff692ad924da2233381bedff90c5c1f5c31368 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 29 Aug 2022 16:48:15 +0100 Subject: arm64/sysreg: Add hwcap for SVE EBF16 SVE has a separate identification register indicating support for BFloat16 operations. Add a hwcap identifying support for EBF16 in this register, mirroring what we did for the non-SVE case. While there is currently an architectural requirement for BF16 support to be the same in SVE and non-SVE contexts there are separate identification registers this separate hwcap helps avoid issues if that requirement were to be relaxed in the future, we have already chosen to have a separate capability for base BF16 support. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220829154815.832347-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 1 + arch/arm64/kernel/cpuinfo.c | 1 + 4 files changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index cef4ae7a3d8b..298b386d3ebe 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -119,6 +119,7 @@ #define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64) #define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT) #define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16) +#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 1ad2568a2569..9b245da6f507 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -92,5 +92,6 @@ #define HWCAP2_SME_FA64 (1 << 30) #define HWCAP2_WFXT (1UL << 31) #define HWCAP2_EBF16 (1UL << 32) +#define HWCAP2_SVE_EBF16 (1UL << 33) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af4de817d712..2a59d6a875bc 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2733,6 +2733,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index d7702f39b4d3..28d4f442b0bc 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -115,6 +115,7 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SME_FA64] = "smefa64", [KERNEL_HWCAP_WFXT] = "wfxt", [KERNEL_HWCAP_EBF16] = "ebf16", + [KERNEL_HWCAP_SVE_EBF16] = "sveebf16", }; #ifdef CONFIG_COMPAT -- cgit v1.2.3 From a04054362e4a9f48a51a742215c2ae1b8bfd04b5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:53:58 +0100 Subject: arm64/sysreg: Remove stray SMIDR_EL1 defines SMIDR_EL1 was converted to automatic generation but some of the constants for fields in it were mistakenly left, remove them. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 818df938a7ad..62376ef4a4f1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -445,10 +445,6 @@ #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) -#define SMIDR_EL1_IMPLEMENTER_SHIFT 24 -#define SMIDR_EL1_SMPS_SHIFT 15 -#define SMIDR_EL1_AFFINITY_SHIFT 0 - #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) -- cgit v1.2.3 From 4c8b18af25be2e87b7b62bec397de2cf160555fa Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:53:59 +0100 Subject: arm64/sysreg: Describe ID_AA64SMFR0_EL1.SMEVer as an enumeration As with the corresponding SVE field ID_AA64ZFR0_EL1.SVEVer and other ID register fields the SMEVer field should be identified as an enumeration but it is currently described as a plain field (most likely due to there presently being only one possible value). Update it to be an enumeration as one would expect. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 9ae483ec1e56..185bc5b0faf7 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -98,7 +98,9 @@ Enum 63 FA64 0b1 IMP EndEnum Res0 62:60 -Field 59:56 SMEver +Enum 59:56 SMEver + 0b0000 IMP +EndEnum Enum 55:52 I16I64 0b0000 NI 0b1111 IMP -- cgit v1.2.3 From d9b230f644196b2986501ecc45c2b2a41075040d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 5 Sep 2022 23:54:00 +0100 Subject: arm64: cache: Remove unused CTR_CACHE_MINLINE_MASK A recent change renamed CTR_DMINLINE_SHIFT to CTR_EL0_DminLine_SHIFT but didn't fully update CTR_CACHE_MINLINE_MASK. As CTR_CACHE_MINLINE_MASK is not used anywhere anyway, just remove it. Signed-off-by: Kristina Martsenko Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cache.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 34256bda0da9..c0b178d1bb4f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void) #define arch_slab_minalign() arch_slab_minalign() #endif -#define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_EL0_DMINLINE_SHIFT | \ - CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT) - #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define ICACHEF_ALIASING 0 -- cgit v1.2.3 From 2d987e64e8c756ffcbace4a598444297df28b8a1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:01 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64MMFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64MMFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 4 +- arch/arm64/include/asm/cpufeature.h | 38 ++++----- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/kvm_pgtable.h | 6 +- arch/arm64/include/asm/sysreg.h | 106 ++++++++++++------------ arch/arm64/kernel/cpufeature.c | 34 ++++---- arch/arm64/kernel/head.S | 6 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 12 +-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/hyp/pgtable.c | 2 +- arch/arm64/kvm/reset.c | 12 +-- arch/arm64/mm/context.c | 6 +- arch/arm64/mm/init.c | 2 +- drivers/firmware/efi/libstub/arm64-stub.c | 4 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 6 +- 15 files changed, 121 insertions(+), 121 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5846145be523..a6e7061d84e7 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -384,8 +384,8 @@ alternative_cb_end .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 mrs \tmp0, ID_AA64MMFR0_EL1 // Narrow PARange to fit the PS field in TCR_ELx - ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 - mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX + ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3 + mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX cmp \tmp0, \tmp1 csel \tmp0, \tmp1, \tmp0, hi bfi \tcr, \tmp0, \pos, #3 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fd7d75a275f6..96ccf823f46e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -597,8 +597,8 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) { - return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || - cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; + return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT) == 0x1 || + cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1; } static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) @@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_TGRAN4_SHIFT); + ID_AA64MMFR0_EL1_TGRAN4_SHIFT); - return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && - (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); + return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX); } static inline bool system_supports_64kb_granule(void) @@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_TGRAN64_SHIFT); + ID_AA64MMFR0_EL1_TGRAN64_SHIFT); - return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && - (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); + return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX); } static inline bool system_supports_16kb_granule(void) @@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_TGRAN16_SHIFT); + ID_AA64MMFR0_EL1_TGRAN16_SHIFT); - return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && - (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); + return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX); } static inline bool system_supports_mixed_endian_el0(void) @@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_BIGENDEL_SHIFT); + ID_AA64MMFR0_EL1_BIGENDEL_SHIFT); return val == 0x1; } @@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) { switch (parange) { - case ID_AA64MMFR0_PARANGE_32: return 32; - case ID_AA64MMFR0_PARANGE_36: return 36; - case ID_AA64MMFR0_PARANGE_40: return 40; - case ID_AA64MMFR0_PARANGE_42: return 42; - case ID_AA64MMFR0_PARANGE_44: return 44; - case ID_AA64MMFR0_PARANGE_48: return 48; - case ID_AA64MMFR0_PARANGE_52: return 52; + case ID_AA64MMFR0_EL1_PARANGE_32: return 32; + case ID_AA64MMFR0_EL1_PARANGE_36: return 36; + case ID_AA64MMFR0_EL1_PARANGE_40: return 40; + case ID_AA64MMFR0_EL1_PARANGE_42: return 42; + case ID_AA64MMFR0_EL1_PARANGE_44: return 44; + case ID_AA64MMFR0_EL1_PARANGE_48: return 48; + case ID_AA64MMFR0_EL1_PARANGE_52: return 52; /* * A future PE could use a value unknown to the kernel. * However, by the "D10.1.4 Principles of the ID scheme diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 2630faa5bc08..faad9e01e52b 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -132,7 +132,7 @@ /* Disable any fine grained traps */ .macro __init_el2_fgt mrs x1, id_aa64mmfr0_el1 - ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 + ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 cbz x1, .Lskip_fgt_\@ mov x0, xzr diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 9f339dffbc1a..1b098bd4cd37 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -16,9 +16,9 @@ static inline u64 kvm_get_parange(u64 mmfr0) { u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX) + parange = ID_AA64MMFR0_EL1_PARANGE_MAX; return parange; } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 62376ef4a4f1..f9af77ab5f98 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -733,53 +733,53 @@ #define ID_AA64PFR1_MTE_ASYMM 0x3 /* id_aa64mmfr0 */ -#define ID_AA64MMFR0_ECV_SHIFT 60 -#define ID_AA64MMFR0_FGT_SHIFT 56 -#define ID_AA64MMFR0_EXS_SHIFT 44 -#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 -#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 -#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 -#define ID_AA64MMFR0_TGRAN4_SHIFT 28 -#define ID_AA64MMFR0_TGRAN64_SHIFT 24 -#define ID_AA64MMFR0_TGRAN16_SHIFT 20 -#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 -#define ID_AA64MMFR0_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 -#define ID_AA64MMFR0_ASID_SHIFT 4 -#define ID_AA64MMFR0_PARANGE_SHIFT 0 - -#define ID_AA64MMFR0_ASID_8 0x0 -#define ID_AA64MMFR0_ASID_16 0x2 - -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf - -#define ID_AA64MMFR0_PARANGE_32 0x0 -#define ID_AA64MMFR0_PARANGE_36 0x1 -#define ID_AA64MMFR0_PARANGE_40 0x2 -#define ID_AA64MMFR0_PARANGE_42 0x3 -#define ID_AA64MMFR0_PARANGE_44 0x4 -#define ID_AA64MMFR0_PARANGE_48 0x5 -#define ID_AA64MMFR0_PARANGE_52 0x6 +#define ID_AA64MMFR0_EL1_ECV_SHIFT 60 +#define ID_AA64MMFR0_EL1_FGT_SHIFT 56 +#define ID_AA64MMFR0_EL1_EXS_SHIFT 44 +#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT 40 +#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT 36 +#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT 32 +#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT 28 +#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT 24 +#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20 +#define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 +#define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 +#define ID_AA64MMFR0_EL1_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_EL1_ASID_SHIFT 4 +#define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 + +#define ID_AA64MMFR0_EL1_ASID_8 0x0 +#define ID_AA64MMFR0_EL1_ASID_16 0x2 + +#define ID_AA64MMFR0_EL1_TGRAN4_NI 0xf +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN64_NI 0xf +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf + +#define ID_AA64MMFR0_EL1_PARANGE_32 0x0 +#define ID_AA64MMFR0_EL1_PARANGE_36 0x1 +#define ID_AA64MMFR0_EL1_PARANGE_40 0x2 +#define ID_AA64MMFR0_EL1_PARANGE_42 0x3 +#define ID_AA64MMFR0_EL1_PARANGE_44 0x4 +#define ID_AA64MMFR0_EL1_PARANGE_48 0x5 +#define ID_AA64MMFR0_EL1_PARANGE_52 0x6 #define ARM64_MIN_PARANGE_BITS 32 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #ifdef CONFIG_ARM64_PA_BITS_52 -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52 #else -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif /* id_aa64mmfr1 */ @@ -951,20 +951,20 @@ #define ID_PFR1_PROGMOD_SHIFT 0 #if defined(CONFIG_ARM64_4K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT #elif defined(CONFIG_ARM64_64K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT #endif #define MVFR2_FPMISC_SHIFT 4 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af4de817d712..3f4512267fe4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -316,9 +316,9 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0), /* * Page size not being supported at Stage-2 is not fatal. You * just give up KVM if PAGE_SIZE isn't supported there. Go fix @@ -334,9 +334,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * fields are inconsistent across vCPUs, then it isn't worth * trying to bring KVM up. */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1), /* * We already refuse to boot CPUs that don't support our configured * page size, so we can only detect mismatches for a page size other @@ -344,20 +344,20 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * exist in the wild so, even though we don't like it, we'll have to go * along with it and treat them as non-strict. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2104,7 +2104,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR0_EL1, - .field_pos = ID_AA64MMFR0_ECV_SHIFT, + .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2751,7 +2751,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ - HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), @@ -3102,7 +3102,7 @@ static void verify_hyp_capabilities(void) /* Verify IPA range */ parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); + ID_AA64MMFR0_EL1_PARANGE_SHIFT); ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange); if (ipa_max < get_kvm_ipa_limit()) { pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index cefe6a73ee54..bffb034d8f73 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow) */ SYM_FUNC_START(__enable_mmu) mrs x3, ID_AA64MMFR0_EL1 - ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4 - cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN + ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 + cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN b.lt __no_granule_support - cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX + cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX b.gt __no_granule_support phys_to_ttbr x2, x2 msr ttbr0_el1, x2 // load TTBR0 diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index fa6e466ed57f..aac538c34f87 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -74,10 +74,10 @@ * - Non-context synchronizing exception entry and exit */ #define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ ) /* @@ -86,8 +86,8 @@ * - 16-bit ASID */ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASID), ID_AA64MMFR0_EL1_ASID_16) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 99c8d8b73e70..823eb4d03956 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) u64 mdcr_set = 0; /* Trap Debug Communications Channel registers */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids)) mdcr_set |= MDCR_EL2_TDCC; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 2cb3867eb7c2..cdf8e76b0be1 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data { static bool kvm_phys_is_valid(u64 phys) { - return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); + return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); } static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 0e08fbe68715..5ae18472205a 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); + ID_AA64MMFR0_EL1_PARANGE_SHIFT); /* * IPA size beyond 48 bits could not be supported * on either 4K or 16K page size. Hence let's cap @@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void) * on the system. */ if (PAGE_SIZE != SZ_64K) - parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); + parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48); /* * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at * Stage-2. If not, things will stop very quickly. */ - switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: + switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) { + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE: kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); return -EINVAL; - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT: kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); break; - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX: kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); break; default: diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b8b4cf0bcf39..8f38a5452d05 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void) { u32 asid; int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), - ID_AA64MMFR0_ASID_SHIFT); + ID_AA64MMFR0_EL1_ASID_SHIFT); switch (fld) { default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); fallthrough; - case ID_AA64MMFR0_ASID_8: + case ID_AA64MMFR0_EL1_ASID_8: asid = 8; break; - case ID_AA64MMFR0_ASID_16: + case ID_AA64MMFR0_EL1_ASID_16: asid = 16; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b9af30be813e..4b4651ee47f2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -360,7 +360,7 @@ void __init arm64_memblock_init(void) extern u16 memstart_offset_seed; u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); int parange = cpuid_feature_extract_unsigned_field( - mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); + mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); s64 range = linear_region_size - BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 577173ee1f83..60973e84d7ab 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -23,8 +23,8 @@ efi_status_t check_platform_features(void) if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return EFI_SUCCESS; - tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { + tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf; + if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 1ef7bbb4acf3..da67a75cdaad 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) } reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); + par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); cd->ttbr = virt_to_phys(mm->pgd); @@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) * addresses larger than what we support. */ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); oas = id_aa64mmfr0_parange_to_phys_shift(fld); if (smmu->oas < oas) return false; /* We can support bigger ASIDs than the CPU, but not smaller */ - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASID_SHIFT); asid_bits = fld ? 16 : 8; if (smmu->asid_bits < asid_bits) return false; -- cgit v1.2.3 From a957c6be2b88564ed413a03a6009f11b1e5d5806 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:02 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64MMFR2_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64MMFR2_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/sysreg.h | 30 +++++++++--------- arch/arm64/kernel/cpufeature.c | 42 +++++++++++++------------- arch/arm64/kernel/head.S | 4 +-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 16 +++++----- 5 files changed, 47 insertions(+), 47 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index a6e7061d84e7..0d5ced93c740 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -612,7 +612,7 @@ alternative_endif .macro offset_ttbr1, ttbr, tmp #ifdef CONFIG_ARM64_VA_BITS_52 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 - and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) cbnz \tmp, .Lskipoffs_\@ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET .Lskipoffs_\@ : diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f9af77ab5f98..bb1f9ae5705f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -805,21 +805,21 @@ #define ID_AA64MMFR1_TIDCP1_IMP 1 /* id_aa64mmfr2 */ -#define ID_AA64MMFR2_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EVT_SHIFT 56 -#define ID_AA64MMFR2_BBM_SHIFT 52 -#define ID_AA64MMFR2_TTL_SHIFT 48 -#define ID_AA64MMFR2_FWB_SHIFT 40 -#define ID_AA64MMFR2_IDS_SHIFT 36 -#define ID_AA64MMFR2_AT_SHIFT 32 -#define ID_AA64MMFR2_ST_SHIFT 28 -#define ID_AA64MMFR2_NV_SHIFT 24 -#define ID_AA64MMFR2_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_LVA_SHIFT 16 -#define ID_AA64MMFR2_IESB_SHIFT 12 -#define ID_AA64MMFR2_LSM_SHIFT 8 -#define ID_AA64MMFR2_UAO_SHIFT 4 -#define ID_AA64MMFR2_CNP_SHIFT 0 +#define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 +#define ID_AA64MMFR2_EL1_EVT_SHIFT 56 +#define ID_AA64MMFR2_EL1_BBM_SHIFT 52 +#define ID_AA64MMFR2_EL1_TTL_SHIFT 48 +#define ID_AA64MMFR2_EL1_FWB_SHIFT 40 +#define ID_AA64MMFR2_EL1_IDS_SHIFT 36 +#define ID_AA64MMFR2_EL1_AT_SHIFT 32 +#define ID_AA64MMFR2_EL1_ST_SHIFT 28 +#define ID_AA64MMFR2_EL1_NV_SHIFT 24 +#define ID_AA64MMFR2_EL1_CCIDX_SHIFT 20 +#define ID_AA64MMFR2_EL1_LVA_SHIFT 16 +#define ID_AA64MMFR2_EL1_IESB_SHIFT 12 +#define ID_AA64MMFR2_EL1_LSM_SHIFT 8 +#define ID_AA64MMFR2_EL1_UAO_SHIFT 4 +#define ID_AA64MMFR2_EL1_CNP_SHIFT 0 /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3f4512267fe4..eb50d52dac1f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -378,21 +378,21 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LVA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CNP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1571,7 +1571,7 @@ bool kaslr_requires_kpti(void) if (IS_ENABLED(CONFIG_ARM64_E0PD)) { u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); if (cpuid_feature_extract_unsigned_field(mmfr2, - ID_AA64MMFR2_E0PD_SHIFT)) + ID_AA64MMFR2_EL1_E0PD_SHIFT)) return false; } @@ -2303,7 +2303,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_STAGE2_FWB, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_FWB_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT, .field_width = 4, .min_field_value = 1, .matches = has_cpuid_feature, @@ -2314,7 +2314,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_ARMv8_4_TTL, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_TTL_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT, .field_width = 4, .min_field_value = 1, .matches = has_cpuid_feature, @@ -2380,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_useable_cnp, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_CNP_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_CNP_SHIFT, .field_width = 4, .min_field_value = 1, .cpu_enable = cpu_enable_cnp, @@ -2499,7 +2499,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, .field_width = 4, - .field_pos = ID_AA64MMFR2_E0PD_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT, .matches = has_cpuid_feature, .min_field_value = 1, .cpu_enable = cpu_enable_e0pd, @@ -2725,7 +2725,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bffb034d8f73..d040f57d3496 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry) */ #if VA_BITS > 48 mrs_s x0, SYS_ID_AA64MMFR2_EL1 - tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT + tst x0, #0xf << ID_AA64MMFR2_EL1_LVA_SHIFT mov x0, #VA_BITS mov x25, #VA_BITS_MIN csel x25, x25, x0, eq @@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 - and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + and x0, x0, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) cbnz x0, 2f update_early_cpu_boot_status \ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index aac538c34f87..3dad7b2079ee 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -120,14 +120,14 @@ * - E0PDx mechanism */ #define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \ ) /* -- cgit v1.2.3 From 55adc08d7e6433357f2b3b4fee248ae9da1fe2fa Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:03 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64PFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64PFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 12 ++--- arch/arm64/include/asm/el2_setup.h | 4 +- arch/arm64/include/asm/sysreg.h | 52 +++++++++---------- arch/arm64/kernel/cpufeature.c | 70 +++++++++++++------------- arch/arm64/kernel/hyp-stub.S | 2 +- arch/arm64/kernel/idreg-override.c | 2 +- arch/arm64/kernel/proton-pack.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 16 +++--- arch/arm64/kvm/hyp/nvhe/pkvm.c | 20 ++++---- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 8 +-- arch/arm64/kvm/sys_regs.c | 26 +++++----- drivers/irqchip/irq-gic-v4.c | 2 +- 13 files changed, 109 insertions(+), 109 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0d5ced93c740..48c7963abaf3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -524,7 +524,7 @@ alternative_endif */ .macro reset_amuserenr_el0, tmpreg mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 - ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 + ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 cbz \tmpreg, .Lskip_\@ // Skip if no AMU present msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 .Lskip_\@: diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 96ccf823f46e..8ba9f1c07432 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -603,21 +603,21 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT); - return val == ID_AA64PFR0_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; } static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT); - return val == ID_AA64PFR0_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; } static inline bool id_aa64pfr0_sve(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT); return val > 0; } @@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope) pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); csv2_val = cpuid_feature_extract_unsigned_field(pfr0, - ID_AA64PFR0_CSV2_SHIFT); + ID_AA64PFR0_EL1_CSV2_SHIFT); return csv2_val == 3; } diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index faad9e01e52b..a011c87ec6e3 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -97,7 +97,7 @@ /* GICv3 system register access */ .macro __init_el2_gicv3 mrs x0, id_aa64pfr0_el1 - ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 + ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 cbz x0, .Lskip_gicv3_\@ mrs_s x0, SYS_ICC_SRE_EL2 @@ -162,7 +162,7 @@ msr_s SYS_HFGITR_EL2, xzr mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU - ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 + ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 cbz x1, .Lskip_fgt_\@ msr_s SYS_HAFGRTR_EL2, xzr diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index bb1f9ae5705f..06f93aa9abb1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -686,32 +686,32 @@ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) /* id_aa64pfr0 */ -#define ID_AA64PFR0_CSV3_SHIFT 60 -#define ID_AA64PFR0_CSV2_SHIFT 56 -#define ID_AA64PFR0_DIT_SHIFT 48 -#define ID_AA64PFR0_AMU_SHIFT 44 -#define ID_AA64PFR0_MPAM_SHIFT 40 -#define ID_AA64PFR0_SEL2_SHIFT 36 -#define ID_AA64PFR0_SVE_SHIFT 32 -#define ID_AA64PFR0_RAS_SHIFT 28 -#define ID_AA64PFR0_GIC_SHIFT 24 -#define ID_AA64PFR0_ASIMD_SHIFT 20 -#define ID_AA64PFR0_FP_SHIFT 16 -#define ID_AA64PFR0_EL3_SHIFT 12 -#define ID_AA64PFR0_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_SHIFT 4 -#define ID_AA64PFR0_EL0_SHIFT 0 - -#define ID_AA64PFR0_AMU 0x1 -#define ID_AA64PFR0_SVE 0x1 -#define ID_AA64PFR0_RAS_V1 0x1 -#define ID_AA64PFR0_RAS_V1P1 0x2 -#define ID_AA64PFR0_FP_NI 0xf -#define ID_AA64PFR0_FP_SUPPORTED 0x0 -#define ID_AA64PFR0_ASIMD_NI 0xf -#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 -#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 +#define ID_AA64PFR0_EL1_CSV3_SHIFT 60 +#define ID_AA64PFR0_EL1_CSV2_SHIFT 56 +#define ID_AA64PFR0_EL1_DIT_SHIFT 48 +#define ID_AA64PFR0_EL1_AMU_SHIFT 44 +#define ID_AA64PFR0_EL1_MPAM_SHIFT 40 +#define ID_AA64PFR0_EL1_SEL2_SHIFT 36 +#define ID_AA64PFR0_EL1_SVE_SHIFT 32 +#define ID_AA64PFR0_EL1_RAS_SHIFT 28 +#define ID_AA64PFR0_EL1_GIC_SHIFT 24 +#define ID_AA64PFR0_EL1_ASIMD_SHIFT 20 +#define ID_AA64PFR0_EL1_FP_SHIFT 16 +#define ID_AA64PFR0_EL1_EL3_SHIFT 12 +#define ID_AA64PFR0_EL1_EL2_SHIFT 8 +#define ID_AA64PFR0_EL1_EL1_SHIFT 4 +#define ID_AA64PFR0_EL1_EL0_SHIFT 0 + +#define ID_AA64PFR0_EL1_AMU 0x1 +#define ID_AA64PFR0_EL1_SVE 0x1 +#define ID_AA64PFR0_EL1_RAS_V1 0x1 +#define ID_AA64PFR0_EL1_RAS_V1P1 0x2 +#define ID_AA64PFR0_EL1_FP_NI 0xf +#define ID_AA64PFR0_EL1_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_ASIMD_NI 0xf +#define ID_AA64PFR0_EL1_ASIMD_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64pfr1 */ #define ID_AA64PFR1_SME_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index eb50d52dac1f..3bda767af32d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -243,22 +243,22 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, ID_AA64PFR0_EL1_ASIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), ARM64_FTR_END, }; @@ -1492,7 +1492,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); return cpuid_feature_extract_signed_field(pfr0, - ID_AA64PFR0_FP_SHIFT) < 0; + ID_AA64PFR0_EL1_FP_SHIFT) < 0; } static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, @@ -2093,7 +2093,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_GIC_SHIFT, + .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2168,9 +2168,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_32bit_el0, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_EL0_SHIFT, + .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, + .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, }, #ifdef CONFIG_KVM { @@ -2180,9 +2180,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_EL1_SHIFT, + .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, + .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, }, { .desc = "Protected KVM", @@ -2201,7 +2201,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * more details. */ .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_CSV3_SHIFT, + .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT, .field_width = 4, .min_field_value = 1, .matches = unmap_kernel_at_el0, @@ -2244,9 +2244,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_SVE, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_SVE_SHIFT, + .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_SVE, + .min_field_value = ID_AA64PFR0_EL1_SVE, .matches = has_cpuid_feature, .cpu_enable = sve_kernel_enable, }, @@ -2259,9 +2259,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_RAS_SHIFT, + .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_RAS_V1, + .min_field_value = ID_AA64PFR0_EL1_RAS_V1, .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ @@ -2278,9 +2278,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_amu, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_AMU_SHIFT, + .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_AMU, + .min_field_value = ID_AA64PFR0_EL1_AMU, .cpu_enable = cpu_amu_enable, }, #endif /* CONFIG_ARM64_AMU_EXTN */ @@ -2485,7 +2485,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_GIC_SHIFT, + .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2708,11 +2708,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), @@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 12c7fad02ae5..f0644e945117 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync) SYM_CODE_END(elx_sync) SYM_CODE_START_LOCAL(__finalise_el2) - check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve + check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve .Linit_sve: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 1b0542c69738..7b90a9b4cc0a 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = { .name = "id_aa64pfr0", .override = &id_aa64pfr0_override, .fields = { - FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), + FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter), {} }, }; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 40be3a7c2c53..6ee586b4e235 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) /* If the CPU has CSV2 set, we're safe */ pfr0 = read_cpuid(ID_AA64PFR0_EL1); - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT)) return SPECTRE_UNAFFECTED; /* Alternatively, we have a list of unaffected CPUs */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 3dad7b2079ee..d94fb45a0e34 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -35,9 +35,9 @@ * - Data Independent Timing */ #define PVM_ID_AA64PFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ ) /* @@ -49,11 +49,11 @@ * Supported by KVM */ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_V1) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 823eb4d03956..d1fa03e2a449 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Protected KVM does not support AArch32 guests. */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); /* * Linux guests assume support for floating-point and Advanced SIMD. Do * not change the trapping behavior for these from the KVM default. */ - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), PVM_ID_AA64PFR0_ALLOW)); - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD), PVM_ID_AA64PFR0_ALLOW)); /* Trap RAS unless all current versions are supported */ - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < - ID_AA64PFR0_RAS_V1P1) { + if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < + ID_AA64PFR0_EL1_RAS_V1P1) { hcr_set |= HCR_TERR | HCR_TEA; hcr_clear |= HCR_FIEN; } /* Trap AMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { hcr_clear |= HCR_AMVOFFEN; cptr_set |= CPTR_EL2_TAM; } /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) cptr_set |= CPTR_EL2_TZ; vcpu->arch.hcr_el2 |= hcr_set; diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index e20fa4475dac..2ebf93336437 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); /* Spectre and Meltdown mitigation in KVM */ - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)kvm->arch.pfr0_csv2); - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)kvm->arch.pfr0_csv3); return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; @@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, * No support for AArch32 guests, therefore, pKVM has no sanitized copy * of AArch32 feature id registers. */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY); return pvm_access_raz_wi(vcpu, p, r); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3234f50b8c4b..cf1fc616e00d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1077,15 +1077,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, switch (id) { case SYS_ID_AA64PFR0_EL1: if (!vcpu_has_sve(vcpu)) - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); if (kvm_vgic_global_state.type == VGIC_V3) { - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1); } break; case SYS_ID_AA64PFR1_EL1: @@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, * it doesn't promise more than what is actually provided (the * guest could otherwise be covered in ectoplasmic residue). */ - csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); + csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT); if (csv2 > 1 || (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) return -EINVAL; /* Same thing for CSV3 */ - csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); + csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT); if (csv3 > 1 || (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) return -EINVAL; /* We can only differ with CSV[23], and anything else is an error */ val ^= read_id_reg(vcpu, rd, false); - val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | - (0xFUL << ID_AA64PFR0_CSV3_SHIFT)); + val &= ~((0xFUL << ID_AA64PFR0_EL1_CSV2_SHIFT) | + (0xFUL << ID_AA64PFR0_EL1_CSV3_SHIFT)); if (val) return -EINVAL; @@ -1825,7 +1825,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, } else { u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); + u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 4ea71b28f9f5..a6277dea4c7a 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void) { unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT); return fld >= 0x3; } -- cgit v1.2.3 From 6ca2b9ca459a598b78265477d288fdec8a0fdd6d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:04 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64PFR1_EL1 constant names Our standard is to include the _EL1 in the constant names for registers but we did not do that for ID_AA64PFR1_EL1, update to do so in preparation for conversion to automatic generation. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 ++-- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/sysreg.h | 34 ++++++++++----------- arch/arm64/kernel/cpufeature.c | 42 +++++++++++++------------- arch/arm64/kernel/hyp-stub.S | 2 +- arch/arm64/kernel/idreg-override.c | 6 ++-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 4 +-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 +- arch/arm64/kvm/sys_regs.c | 4 +-- arch/arm64/mm/mmu.c | 2 +- arch/arm64/mm/proc.S | 4 +-- 12 files changed, 55 insertions(+), 55 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8ba9f1c07432..214325a7f627 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -624,16 +624,16 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr1_sme(u64 pfr1) { - u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT); return val > 0; } static inline bool id_aa64pfr1_mte(u64 pfr1) { - u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); - return val >= ID_AA64PFR1_MTE; + return val >= ID_AA64PFR1_EL1_MTE; } void __init setup_cpu_features(void); diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index a011c87ec6e3..80ef55b66196 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -149,7 +149,7 @@ mov x0, xzr mrs x1, id_aa64pfr1_el1 - ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 + ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 cbz x1, .Lset_fgt_\@ /* Disable nVHE traps of TPIDR2 and SMPRI */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06f93aa9abb1..e72bab4452e9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -714,23 +714,23 @@ #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64pfr1 */ -#define ID_AA64PFR1_SME_SHIFT 24 -#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_RASFRAC_SHIFT 12 -#define ID_AA64PFR1_MTE_SHIFT 8 -#define ID_AA64PFR1_SSBS_SHIFT 4 -#define ID_AA64PFR1_BT_SHIFT 0 - -#define ID_AA64PFR1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_BT_BTI 0x1 -#define ID_AA64PFR1_SME 1 - -#define ID_AA64PFR1_MTE_NI 0x0 -#define ID_AA64PFR1_MTE_EL0 0x1 -#define ID_AA64PFR1_MTE 0x2 -#define ID_AA64PFR1_MTE_ASYMM 0x3 +#define ID_AA64PFR1_EL1_SME_SHIFT 24 +#define ID_AA64PFR1_EL1_MPAMFRAC_SHIFT 16 +#define ID_AA64PFR1_EL1_RASFRAC_SHIFT 12 +#define ID_AA64PFR1_EL1_MTE_SHIFT 8 +#define ID_AA64PFR1_EL1_SSBS_SHIFT 4 +#define ID_AA64PFR1_EL1_BT_SHIFT 0 + +#define ID_AA64PFR1_EL1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS 2 +#define ID_AA64PFR1_EL1_BT_BTI 0x1 +#define ID_AA64PFR1_EL1_SME 1 + +#define ID_AA64PFR1_EL1_MTE_NI 0x0 +#define ID_AA64PFR1_EL1_MTE_EL0 0x1 +#define ID_AA64PFR1_EL1_MTE 0x2 +#define ID_AA64PFR1_EL1_MTE_ASYMM 0x3 /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_ECV_SHIFT 60 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3bda767af32d..2e19cbdab50a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -264,14 +264,14 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAMFRAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2367,10 +2367,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_SSBS_SHIFT, + .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, + .min_field_value = ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY, }, #ifdef CONFIG_ARM64_CNP { @@ -2528,9 +2528,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = bti_enable, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_BT_SHIFT, + .field_pos = ID_AA64PFR1_EL1_BT_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_BT_BTI, + .min_field_value = ID_AA64PFR1_EL1_BT_BTI, .sign = FTR_UNSIGNED, }, #endif @@ -2541,9 +2541,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_MTE_SHIFT, + .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_MTE, + .min_field_value = ID_AA64PFR1_EL1_MTE, .sign = FTR_UNSIGNED, .cpu_enable = cpu_enable_mte, }, @@ -2553,9 +2553,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_MTE_SHIFT, + .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_MTE_ASYMM, + .min_field_value = ID_AA64PFR1_EL1_MTE_ASYMM, .sign = FTR_UNSIGNED, }, #endif /* CONFIG_ARM64_MTE */ @@ -2577,9 +2577,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_SME, .sys_reg = SYS_ID_AA64PFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR1_SME_SHIFT, + .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_SME, + .min_field_value = ID_AA64PFR1_EL1_SME, .matches = has_cpuid_feature, .cpu_enable = sme_kernel_enable, }, @@ -2739,24 +2739,24 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif #ifdef CONFIG_ARM64_PTR_AUTH HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index f0644e945117..bce1f5f6b8c9 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) msr_s SYS_ZCR_EL2, x1 // length for EL1. .Lskip_sve: - check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme + check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme .Linit_sme: /* SME register access and priority mapping */ mrs x0, cptr_el2 // Disable SME traps diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 7b90a9b4cc0a..8c474915a11d 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = { .name = "id_aa64pfr1", .override = &id_aa64pfr1_override, .fields = { - FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), - FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), - FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), + FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ), + FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL), + FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter), {} }, }; diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index d94fb45a0e34..fad5406fc71a 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -62,8 +62,8 @@ * - Speculative Store Bypassing */ #define PVM_ID_AA64PFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ - ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ + ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \ + ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index d1fa03e2a449..05301d3b3fc2 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) u64 hcr_clear = 0; /* Memory Tagging: Trap and Treat as Untagged if not supported. */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) { hcr_set |= HCR_TID5; hcr_clear |= HCR_DCT | HCR_ATA; } diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 2ebf93336437..0f9ac25afdf4 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; if (!kvm_has_mte(kvm)) - allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); return id_aa64pfr1_el1_sys_val & allow_mask; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index cf1fc616e00d..ff4405a6ea25 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1090,9 +1090,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64PFR1_EL1: if (!kvm_has_mte(vcpu->kvm)) - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7ad44585f40..5810eddfb48e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); return cpuid_feature_extract_unsigned_field(pfr1, - ID_AA64PFR1_BT_SHIFT); + ID_AA64PFR1_EL1_BT_SHIFT); } /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7837a69524c5..15539da36bc3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup) * (ID_AA64PFR1_EL1[11:8] > 1). */ mrs x10, ID_AA64PFR1_EL1 - ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 - cmp x10, #ID_AA64PFR1_MTE + ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4 + cmp x10, #ID_AA64PFR1_EL1_MTE b.lt 1f /* Normal Tagged memory type at the corresponding MAIR index */ -- cgit v1.2.3 From ed7c138d6f82a9e75f27c9ff43262ba8158533b0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:05 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64MMFR0_EL1.BigEnd For some reason we refer to ID_AA64MMFR0_EL1.BigEnd as BIGENDEL. Remove the EL from the name, bringing the naming into sync with DDI0487H.a. Due to the large amount of MixedCase in this register which isn't really consistent with either the kernel style or the majority of the architecture the use of upper case is preserved. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-9-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 214325a7f627..d7b96dc9364b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -597,7 +597,7 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) { - return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT) == 0x1 || + return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 || cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1; } @@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_EL1_BIGENDEL_SHIFT); + ID_AA64MMFR0_EL1_BIGEND_SHIFT); return val == 0x1; } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e72bab4452e9..f1430c77911a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -744,7 +744,7 @@ #define ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20 #define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 #define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_EL1_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 #define ID_AA64MMFR0_EL1_ASID_SHIFT 4 #define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2e19cbdab50a..def03583523b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -351,7 +351,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index fad5406fc71a..0ece26707fc0 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -74,7 +74,7 @@ * - Non-context synchronizing exception entry and exit */ #define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ -- cgit v1.2.3 From 07d7d848b96b57eccafeafad023d02c7f627d494 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:06 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64MMFR0_EL1.ASIDBits For some reason we refer to ID_AA64MMFR0_EL1.ASIDBits as ASID. Add BITS into the name, bringing the naming into sync with DDI0487H.a. Due to the large amount of MixedCase in this register which isn't really consistent with either the kernel style or the majority of the architecture the use of upper case is preserved. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-10-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 6 +++--- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- arch/arm64/mm/context.c | 6 +++--- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f1430c77911a..b6cd9996e12b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -745,11 +745,11 @@ #define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 #define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 #define ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 -#define ID_AA64MMFR0_EL1_ASID_SHIFT 4 +#define ID_AA64MMFR0_EL1_ASIDBITS_SHIFT 4 #define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 -#define ID_AA64MMFR0_EL1_ASID_8 0x0 -#define ID_AA64MMFR0_EL1_ASID_16 0x2 +#define ID_AA64MMFR0_EL1_ASIDBITS_8 0x0 +#define ID_AA64MMFR0_EL1_ASIDBITS_16 0x2 #define ID_AA64MMFR0_EL1_TGRAN4_NI 0xf #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index def03583523b..ba44f67c5544 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -352,7 +352,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0ece26707fc0..0c2e474d0c9e 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -87,7 +87,7 @@ */ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASID), ID_AA64MMFR0_EL1_ASID_16) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \ ) /* diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8f38a5452d05..e1e0dca01839 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void) { u32 asid; int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), - ID_AA64MMFR0_EL1_ASID_SHIFT); + ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); switch (fld) { default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); fallthrough; - case ID_AA64MMFR0_EL1_ASID_8: + case ID_AA64MMFR0_EL1_ASIDBITS_8: asid = 8; break; - case ID_AA64MMFR0_EL1_ASID_16: + case ID_AA64MMFR0_EL1_ASIDBITS_16: asid = 16; } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index da67a75cdaad..5968a568aae2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -431,7 +431,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return false; /* We can support bigger ASIDs than the CPU, but not smaller */ - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASID_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); asid_bits = fld ? 16 : 8; if (smmu->asid_bits < asid_bits) return false; -- cgit v1.2.3 From 6fcd019359028f3c6477508b329d69e27f41d895 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 5 Sep 2022 23:54:07 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64MMFR1_EL1 fields In preparation for converting the ID_AA64MMFR1_EL1 system register defines to automatic generation, rename them to follow the conventions used by other automatically generated registers: * Add _EL1 in the register name. * Rename fields to match the names in the ARM ARM: * LOR -> LO * HPD -> HPDS * VHE -> VH * HADBS -> HAFDBS * SPECSEI -> SpecSEI * VMIDBITS -> VMIDBits There should be no functional change as a result of this patch. Signed-off-by: Kristina Martsenko Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-11-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 8 +++--- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/sysreg.h | 40 +++++++++++++------------- arch/arm64/kernel/cpufeature.c | 36 +++++++++++------------ arch/arm64/kernel/hyp-stub.S | 4 +-- arch/arm64/kernel/idreg-override.c | 2 +- arch/arm64/kernel/proton-pack.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 12 ++++---- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/sys_regs.c | 2 +- 10 files changed, 55 insertions(+), 55 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index d7b96dc9364b..5fc43f7f3ed6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void) mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_HADBS_SHIFT); + ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } static inline bool cpu_has_pan(void) { u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_PAN_SHIFT); + ID_AA64MMFR1_EL1_PAN_SHIFT); } #ifdef CONFIG_ARM64_AMU_EXTN @@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) int vmid_bits; vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_VMIDBITS_SHIFT); - if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) + ID_AA64MMFR1_EL1_VMIDBits_SHIFT); + if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16) return 16; /* diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 80ef55b66196..b6e9bea7c9ec 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -83,7 +83,7 @@ /* LORegions */ .macro __init_el2_lor mrs x1, id_aa64mmfr1_el1 - ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 + ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 cbz x0, .Lskip_lor_\@ msr_s SYS_LORC_EL1, xzr .Lskip_lor_\@: diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b6cd9996e12b..410b628fbb67 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -783,26 +783,26 @@ #endif /* id_aa64mmfr1 */ -#define ID_AA64MMFR1_ECBHB_SHIFT 60 -#define ID_AA64MMFR1_TIDCP1_SHIFT 52 -#define ID_AA64MMFR1_HCX_SHIFT 40 -#define ID_AA64MMFR1_AFP_SHIFT 44 -#define ID_AA64MMFR1_ETS_SHIFT 36 -#define ID_AA64MMFR1_TWED_SHIFT 32 -#define ID_AA64MMFR1_XNX_SHIFT 28 -#define ID_AA64MMFR1_SPECSEI_SHIFT 24 -#define ID_AA64MMFR1_PAN_SHIFT 20 -#define ID_AA64MMFR1_LOR_SHIFT 16 -#define ID_AA64MMFR1_HPD_SHIFT 12 -#define ID_AA64MMFR1_VHE_SHIFT 8 -#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 -#define ID_AA64MMFR1_HADBS_SHIFT 0 - -#define ID_AA64MMFR1_VMIDBITS_8 0 -#define ID_AA64MMFR1_VMIDBITS_16 2 - -#define ID_AA64MMFR1_TIDCP1_NI 0 -#define ID_AA64MMFR1_TIDCP1_IMP 1 +#define ID_AA64MMFR1_EL1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_EL1_TIDCP1_SHIFT 52 +#define ID_AA64MMFR1_EL1_HCX_SHIFT 40 +#define ID_AA64MMFR1_EL1_AFP_SHIFT 44 +#define ID_AA64MMFR1_EL1_ETS_SHIFT 36 +#define ID_AA64MMFR1_EL1_TWED_SHIFT 32 +#define ID_AA64MMFR1_EL1_XNX_SHIFT 28 +#define ID_AA64MMFR1_EL1_SpecSEI_SHIFT 24 +#define ID_AA64MMFR1_EL1_PAN_SHIFT 20 +#define ID_AA64MMFR1_EL1_LO_SHIFT 16 +#define ID_AA64MMFR1_EL1_HPDS_SHIFT 12 +#define ID_AA64MMFR1_EL1_VH_SHIFT 8 +#define ID_AA64MMFR1_EL1_VMIDBits_SHIFT 4 +#define ID_AA64MMFR1_EL1_HAFDBS_SHIFT 0 + +#define ID_AA64MMFR1_EL1_VMIDBits_8 0 +#define ID_AA64MMFR1_EL1_VMIDBits_16 2 + +#define ID_AA64MMFR1_EL1_TIDCP1_NI 0 +#define ID_AA64MMFR1_EL1_TIDCP1_IMP 1 /* id_aa64mmfr2 */ #define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ba44f67c5544..534819afadd5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -362,18 +362,18 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2116,7 +2116,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR1_EL1, - .field_pos = ID_AA64MMFR1_PAN_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2130,7 +2130,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR1_EL1, - .field_pos = ID_AA64MMFR1_PAN_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 3, @@ -2344,7 +2344,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HW_DBM, .sys_reg = SYS_ID_AA64MMFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR1_HADBS_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT, .field_width = 4, .min_field_value = 2, .matches = has_hw_dbm, @@ -2614,9 +2614,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .sys_reg = SYS_ID_AA64MMFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR1_TIDCP1_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT, .field_width = 4, - .min_field_value = ID_AA64MMFR1_TIDCP1_IMP, + .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP, .matches = has_cpuid_feature, .cpu_enable = cpu_trap_el0_impdef, }, @@ -2752,7 +2752,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), - HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index bce1f5f6b8c9..2ee18c860f2a 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? - ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 + ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 cbz x1, .Lskip_sme mrs_s x1, SYS_HCRX_EL2 @@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) tbnz x1, #0, 1f // Needs to be VHE capable, obviously - check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f + check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f 1: mov_q x0, HVC_STUB_ERR eret diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 8c474915a11d..95133765ed29 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = { .name = "id_aa64mmfr1", .override = &id_aa64mmfr1_override, .fields = { - FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter), + FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter), {} }, }; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 6ee586b4e235..fe3bc4c1c5ac 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope) mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_ECBHB_SHIFT); + ID_AA64MMFR1_EL1_ECBHB_SHIFT); } bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0c2e474d0c9e..1653299ff8f8 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -100,12 +100,12 @@ * - Enhanced Translation Synchronization */ #define PVM_ID_AA64MMFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 05301d3b3fc2..b92ecdd6bdab 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) u64 hcr_set = 0; /* Trap LOR */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids)) hcr_set |= HCR_TLOR; vcpu->arch.hcr_el2 |= hcr_set; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ff4405a6ea25..fa61793467a7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u32 sr = reg_to_encoding(r); - if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { + if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) { kvm_inject_undefined(vcpu); return false; } -- cgit v1.2.3 From 8f40baded4a14ef56da0b73e028117b146ca4584 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:08 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64MMFR2_EL1.VARange The kernel refers to ID_AA64MMFR2_EL1.VARange as LVA. In preparation for automatic generation of defines for the system registers bring the naming used by the kernel in sync with that of DDI0487H.a. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-12-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/head.S | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 48c7963abaf3..c1fc5f7bb978 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -612,7 +612,7 @@ alternative_endif .macro offset_ttbr1, ttbr, tmp #ifdef CONFIG_ARM64_VA_BITS_52 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 - and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) + and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT) cbnz \tmp, .Lskipoffs_\@ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET .Lskipoffs_\@ : diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 410b628fbb67..c80f1f7a10f1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -815,7 +815,7 @@ #define ID_AA64MMFR2_EL1_ST_SHIFT 28 #define ID_AA64MMFR2_EL1_NV_SHIFT 24 #define ID_AA64MMFR2_EL1_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_EL1_LVA_SHIFT 16 +#define ID_AA64MMFR2_EL1_VARange_SHIFT 16 #define ID_AA64MMFR2_EL1_IESB_SHIFT 12 #define ID_AA64MMFR2_EL1_LSM_SHIFT 8 #define ID_AA64MMFR2_EL1_UAO_SHIFT 4 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 534819afadd5..f927b4451613 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -388,7 +388,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LVA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d040f57d3496..b5accf53a153 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry) */ #if VA_BITS > 48 mrs_s x0, SYS_ID_AA64MMFR2_EL1 - tst x0, #0xf << ID_AA64MMFR2_EL1_LVA_SHIFT + tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT mov x0, #VA_BITS mov x25, #VA_BITS_MIN csel x25, x25, x0, eq @@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 - and x0, x0, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) + and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT) cbnz x0, 2f update_early_cpu_boot_status \ -- cgit v1.2.3 From ca951862ad3e6fa33d8ba8220b80f3fdc496821c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:09 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64MMFR2_EL1.CnP The kernel refers to ID_AA64MMFR2_EL1.CnP as CNP. In preparation for automatic generation of defines for the system registers bring the naming used by the kernel in sync with that of DDI0487H.a. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-13-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 4 ++-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c80f1f7a10f1..7795a043a8ff 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -819,7 +819,7 @@ #define ID_AA64MMFR2_EL1_IESB_SHIFT 12 #define ID_AA64MMFR2_EL1_LSM_SHIFT 8 #define ID_AA64MMFR2_EL1_UAO_SHIFT 4 -#define ID_AA64MMFR2_EL1_CNP_SHIFT 0 +#define ID_AA64MMFR2_EL1_CnP_SHIFT 0 /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f927b4451613..2de9b28ee84d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -392,7 +392,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2380,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_useable_cnp, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_EL1_CNP_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT, .field_width = 4, .min_field_value = 1, .cpu_enable = cpu_enable_cnp, diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 1653299ff8f8..0ba290e1a791 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -120,7 +120,7 @@ * - E0PDx mechanism */ #define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ -- cgit v1.2.3 From 4f8456c3199dab2436dab1f8ec17cba853fa1060 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:10 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64PFR0_EL1 constants We generally refer to the baseline feature implemented as _IMP so in preparation for automatic generation of register defines update those for ID_AA64PFR0_EL1 to reflect this. In the case of ASIMD we don't actually use the define so just remove it. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-14-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 9 ++++----- arch/arm64/kernel/cpufeature.c | 8 ++++---- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7795a043a8ff..c2fffb863f08 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,14 +702,13 @@ #define ID_AA64PFR0_EL1_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_EL0_SHIFT 0 -#define ID_AA64PFR0_EL1_AMU 0x1 -#define ID_AA64PFR0_EL1_SVE 0x1 -#define ID_AA64PFR0_EL1_RAS_V1 0x1 +#define ID_AA64PFR0_EL1_AMU_IMP 0x1 +#define ID_AA64PFR0_EL1_SVE_IMP 0x1 +#define ID_AA64PFR0_EL1_RAS_IMP 0x1 #define ID_AA64PFR0_EL1_RAS_V1P1 0x2 #define ID_AA64PFR0_EL1_FP_NI 0xf -#define ID_AA64PFR0_EL1_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_FP_IMP 0x0 #define ID_AA64PFR0_EL1_ASIMD_NI 0xf -#define ID_AA64PFR0_EL1_ASIMD_SUPPORTED 0x0 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2de9b28ee84d..43afa9a1cd73 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2246,7 +2246,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_SVE, + .min_field_value = ID_AA64PFR0_EL1_SVE_IMP, .matches = has_cpuid_feature, .cpu_enable = sve_kernel_enable, }, @@ -2261,7 +2261,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_RAS_V1, + .min_field_value = ID_AA64PFR0_EL1_RAS_IMP, .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ @@ -2280,7 +2280,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_AMU, + .min_field_value = ID_AA64PFR0_EL1_AMU_IMP, .cpu_enable = cpu_amu_enable, }, #endif /* CONFIG_ARM64_AMU_EXTN */ @@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0ba290e1a791..6200d53600ba 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -53,7 +53,7 @@ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_V1) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \ ) /* -- cgit v1.2.3 From 5620b4b0371569b9ba65b756cd6d5fc6af47ddd6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:11 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64PFR0_EL1.AdvSIMD constants The architecture refers to the register field identifying advanced SIMD as AdvSIMD but the kernel refers to it as ASIMD. Use the architecture's naming. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-15-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- arch/arm64/kernel/cpufeature.c | 6 +++--- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c2fffb863f08..78087b1a3ca4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -695,7 +695,7 @@ #define ID_AA64PFR0_EL1_SVE_SHIFT 32 #define ID_AA64PFR0_EL1_RAS_SHIFT 28 #define ID_AA64PFR0_EL1_GIC_SHIFT 24 -#define ID_AA64PFR0_EL1_ASIMD_SHIFT 20 +#define ID_AA64PFR0_EL1_AdvSIMD_SHIFT 20 #define ID_AA64PFR0_EL1_FP_SHIFT 16 #define ID_AA64PFR0_EL1_EL3_SHIFT 12 #define ID_AA64PFR0_EL1_EL2_SHIFT 8 @@ -708,7 +708,7 @@ #define ID_AA64PFR0_EL1_RAS_V1P1 0x2 #define ID_AA64PFR0_EL1_FP_NI 0xf #define ID_AA64PFR0_EL1_FP_IMP 0x0 -#define ID_AA64PFR0_EL1_ASIMD_NI 0xf +#define ID_AA64PFR0_EL1_AdvSIMD_NI 0xf #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 43afa9a1cd73..1610b35229e4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -253,7 +253,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, ID_AA64PFR0_EL1_ASIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), @@ -2710,8 +2710,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 6200d53600ba..07edfc7524c9 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -36,7 +36,7 @@ */ #define PVM_ID_AA64PFR0_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ ) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index b92ecdd6bdab..fc3e32709ba2 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -31,7 +31,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) */ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), PVM_ID_AA64PFR0_ALLOW)); - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), PVM_ID_AA64PFR0_ALLOW)); /* Trap RAS unless all current versions are supported */ -- cgit v1.2.3 From 53275da8dccc8f8140f24e2634c5d61e3206307d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:12 +0100 Subject: arm64/sysreg: Standardise naming for SSBS feature enumeration In preparation for conversion to automatic generation refresh the names given to the items in the SSBS feature enumeration to reflect our standard pattern for naming, corresponding to the architecture feature names they reflect. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-16-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 10 +++++----- arch/arm64/kernel/cpufeature.c | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 78087b1a3ca4..d6df8fe8e61e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -720,11 +720,11 @@ #define ID_AA64PFR1_EL1_SSBS_SHIFT 4 #define ID_AA64PFR1_EL1_BT_SHIFT 0 -#define ID_AA64PFR1_EL1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_EL1_BT_BTI 0x1 -#define ID_AA64PFR1_EL1_SME 1 +#define ID_AA64PFR1_EL1_SSBS_NI 0 +#define ID_AA64PFR1_EL1_SSBS_IMP 1 +#define ID_AA64PFR1_EL1_SSBS_SSBS2 2 +#define ID_AA64PFR1_EL1_BT_BTI 0x1 +#define ID_AA64PFR1_EL1_SME 1 #define ID_AA64PFR1_EL1_MTE_NI 0x0 #define ID_AA64PFR1_EL1_MTE_EL0 0x1 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1610b35229e4..7e58cb5b9185 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -269,7 +269,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_PSTATE_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0), ARM64_FTR_END, @@ -2370,7 +2370,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY, + .min_field_value = ID_AA64PFR1_EL1_SSBS_IMP, }, #ifdef CONFIG_ARM64_CNP { @@ -2739,7 +2739,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif -- cgit v1.2.3 From 2e75b393ff2e45a32e9621e1b27cd7854122c1c8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:13 +0100 Subject: arm64/sysreg: Standardise naming for MTE feature enumeration In preparation for conversion to automatic generation refresh the names given to the items in the MTE feture enumeration to reflect our standard pattern for naming, corresponding to the architecture feature names they reflect. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-17-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/sysreg.h | 6 +++--- arch/arm64/kernel/cpufeature.c | 8 ++++---- arch/arm64/mm/proc.S | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5fc43f7f3ed6..79bb9e58d9c6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -633,7 +633,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); - return val >= ID_AA64PFR1_EL1_MTE; + return val >= ID_AA64PFR1_EL1_MTE_MTE2; } void __init setup_cpu_features(void); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d6df8fe8e61e..385242a6e380 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -727,9 +727,9 @@ #define ID_AA64PFR1_EL1_SME 1 #define ID_AA64PFR1_EL1_MTE_NI 0x0 -#define ID_AA64PFR1_EL1_MTE_EL0 0x1 -#define ID_AA64PFR1_EL1_MTE 0x2 -#define ID_AA64PFR1_EL1_MTE_ASYMM 0x3 +#define ID_AA64PFR1_EL1_MTE_IMP 0x1 +#define ID_AA64PFR1_EL1_MTE_MTE2 0x2 +#define ID_AA64PFR1_EL1_MTE_MTE3 0x3 /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_ECV_SHIFT 60 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7e58cb5b9185..2afc0a852359 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2543,7 +2543,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64PFR1_EL1, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_EL1_MTE, + .min_field_value = ID_AA64PFR1_EL1_MTE_MTE2, .sign = FTR_UNSIGNED, .cpu_enable = cpu_enable_mte, }, @@ -2555,7 +2555,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64PFR1_EL1, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_EL1_MTE_ASYMM, + .min_field_value = ID_AA64PFR1_EL1_MTE_MTE3, .sign = FTR_UNSIGNED, }, #endif /* CONFIG_ARM64_MTE */ @@ -2748,8 +2748,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 15539da36bc3..5f7784ee6044 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -435,7 +435,7 @@ SYM_FUNC_START(__cpu_setup) */ mrs x10, ID_AA64PFR1_EL1 ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4 - cmp x10, #ID_AA64PFR1_EL1_MTE + cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2 b.lt 1f /* Normal Tagged memory type at the corresponding MAIR index */ -- cgit v1.2.3 From cf7fdbbe83a765e8ad2e0a556ec2c4e675bc3893 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:14 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 fractional version fields The naming for fractional versions fields in ID_AA64PFR1_EL1 does not align with that in the architecture, lacking underscores and using upper case where the architecture uses lower case. In preparation for automatic generation of defines bring the code in sync with the architecture, no functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-18-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- arch/arm64/kernel/cpufeature.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 385242a6e380..aa1e970eddd5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -714,8 +714,8 @@ /* id_aa64pfr1 */ #define ID_AA64PFR1_EL1_SME_SHIFT 24 -#define ID_AA64PFR1_EL1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_EL1_RASFRAC_SHIFT 12 +#define ID_AA64PFR1_EL1_MPAM_frac_SHIFT 16 +#define ID_AA64PFR1_EL1_RAS_frac_SHIFT 12 #define ID_AA64PFR1_EL1_MTE_SHIFT 8 #define ID_AA64PFR1_EL1_SSBS_SHIFT 4 #define ID_AA64PFR1_EL1_BT_SHIFT 0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2afc0a852359..636f6b207ef6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -265,8 +265,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAMFRAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RASFRAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI), -- cgit v1.2.3 From 514e9b2aed04e86c93363b69318e23b19fb9ef78 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:15 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 BTI enumeration In preparation for automatic generation of constants update the define for BTI being implemented to the convention we are using, no functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-19-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index aa1e970eddd5..05401a9b4709 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -723,7 +723,7 @@ #define ID_AA64PFR1_EL1_SSBS_NI 0 #define ID_AA64PFR1_EL1_SSBS_IMP 1 #define ID_AA64PFR1_EL1_SSBS_SSBS2 2 -#define ID_AA64PFR1_EL1_BT_BTI 0x1 +#define ID_AA64PFR1_EL1_BT_IMP 0x1 #define ID_AA64PFR1_EL1_SME 1 #define ID_AA64PFR1_EL1_MTE_NI 0x0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 636f6b207ef6..5bf158750e02 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2530,7 +2530,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64PFR1_EL1, .field_pos = ID_AA64PFR1_EL1_BT_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_EL1_BT_BTI, + .min_field_value = ID_AA64PFR1_EL1_BT_IMP, .sign = FTR_UNSIGNED, }, #endif @@ -2741,7 +2741,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { #endif HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif #ifdef CONFIG_ARM64_PTR_AUTH HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), -- cgit v1.2.3 From ed9075201c5a8e89afa969e2bbe947a5ea9c4620 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:16 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 SME enumeration In preparation for automatic generation of constants update the define for SME being implemented to the convention we are using, no functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-20-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 05401a9b4709..99a8c433db23 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -724,7 +724,7 @@ #define ID_AA64PFR1_EL1_SSBS_IMP 1 #define ID_AA64PFR1_EL1_SSBS_SSBS2 2 #define ID_AA64PFR1_EL1_BT_IMP 0x1 -#define ID_AA64PFR1_EL1_SME 1 +#define ID_AA64PFR1_EL1_SME_IMP 1 #define ID_AA64PFR1_EL1_MTE_NI 0x0 #define ID_AA64PFR1_EL1_MTE_IMP 0x1 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5bf158750e02..c2e42feb3e1a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2579,7 +2579,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_EL1_SME, + .min_field_value = ID_AA64PFR1_EL1_SME_IMP, .matches = has_cpuid_feature, .cpu_enable = sme_kernel_enable, }, @@ -2756,7 +2756,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), -- cgit v1.2.3 From cfaa32108aeaf2f4137e4eb01c82e6d37a732b07 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:17 +0100 Subject: arm64/sysreg: Convert HCRX_EL2 to automatic generation Convert HCRX_EL2 to be automatically generated as per DDI04187H.a, n functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-21-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ---- arch/arm64/tools/sysreg | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 99a8c433db23..74690363ae39 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -533,7 +533,6 @@ #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) @@ -1023,9 +1022,6 @@ #define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_E0TRE BIT(0) -/* HCRX_EL2 definitions */ -#define HCRX_EL2_SMPME_MASK (1 << 5) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 185bc5b0faf7..746d4d40133e 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -516,6 +516,22 @@ Sysreg ZCR_EL2 3 4 1 2 0 Fields ZCR_ELx EndSysreg +Sysreg HCRX_EL2 3 4 1 2 2 +Res0 63:12 +Field 11 MSCEn +Field 10 MCE2 +Field 9 CMOW +Field 8 VFNMI +Field 7 VINMI +Field 6 TALLINT +Field 5 SMPME +Field 4 FGTnXS +Field 3 FnXS +Field 2 EnASR +Field 1 EnALS +Field 0 EnAS0 +EndSysreg + Sysreg SMPRIMAP_EL2 3 4 1 2 5 Field 63:60 P15 Field 59:56 P14 -- cgit v1.2.3 From 0b7ed4d8f59c252c7b0339947f69da6770979c0a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:18 +0100 Subject: arm64/sysreg: Convert ID_AA64MMFR0_EL1 to automatic generation Automatically generate most of the defines for ID_AA64MMFR0_EL1 mostly as per DDI0487H.a. Due to the large amount of MixedCase in this register which isn't really consistent with either the kernel style or the majority of the architecture the use of upper case is preserved. We also leave in place a number of min/max/default value definitions which don't flow from the architecture definitions. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-22-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 30 ----------------- arch/arm64/tools/sysreg | 73 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 30 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 74690363ae39..787d9fa3c8e0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -199,7 +199,6 @@ #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) -#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) #define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) @@ -731,42 +730,13 @@ #define ID_AA64PFR1_EL1_MTE_MTE3 0x3 /* id_aa64mmfr0 */ -#define ID_AA64MMFR0_EL1_ECV_SHIFT 60 -#define ID_AA64MMFR0_EL1_FGT_SHIFT 56 -#define ID_AA64MMFR0_EL1_EXS_SHIFT 44 -#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT 40 -#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT 36 -#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT 32 -#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT 28 -#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT 24 -#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20 -#define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 -#define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 -#define ID_AA64MMFR0_EL1_ASIDBITS_SHIFT 4 -#define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 - -#define ID_AA64MMFR0_EL1_ASIDBITS_8 0x0 -#define ID_AA64MMFR0_EL1_ASIDBITS_16 0x2 - -#define ID_AA64MMFR0_EL1_TGRAN4_NI 0xf #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_EL1_TGRAN64_NI 0xf #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_EL1_TGRAN16_NI 0x0 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf -#define ID_AA64MMFR0_EL1_PARANGE_32 0x0 -#define ID_AA64MMFR0_EL1_PARANGE_36 0x1 -#define ID_AA64MMFR0_EL1_PARANGE_40 0x2 -#define ID_AA64MMFR0_EL1_PARANGE_42 0x3 -#define ID_AA64MMFR0_EL1_PARANGE_44 0x4 -#define ID_AA64MMFR0_EL1_PARANGE_48 0x5 -#define ID_AA64MMFR0_EL1_PARANGE_52 0x6 - #define ARM64_MIN_PARANGE_BITS 32 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 746d4d40133e..c1d800c0d4d5 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -315,6 +315,79 @@ Enum 3:0 WFxT EndEnum EndSysreg +Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0 +Enum 63:60 ECV + 0b0000 NI + 0b0001 IMP + 0b0010 CNTPOFF +EndEnum +Enum 59:56 FGT + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 55:48 +Enum 47:44 EXS + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 43:40 TGRAN4_2 + 0b0000 TGRAN4 + 0b0001 NI + 0b0010 IMP + 0b0011 52_BIT +EndEnum +Enum 39:36 TGRAN64_2 + 0b0000 TGRAN64 + 0b0001 NI + 0b0010 IMP +EndEnum +Enum 35:32 TGRAN16_2 + 0b0000 TGRAN16 + 0b0001 NI + 0b0010 IMP + 0b0011 52_BIT +EndEnum +Enum 31:28 TGRAN4 + 0b0000 IMP + 0b0001 52_BIT + 0b1111 NI +EndEnum +Enum 27:24 TGRAN64 + 0b0000 IMP + 0b1111 NI +EndEnum +Enum 23:20 TGRAN16 + 0b0000 NI + 0b0001 IMP + 0b0010 52_BIT +EndEnum +Enum 19:16 BIGENDEL0 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 15:12 SNSMEM + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 11:8 BIGEND + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 7:4 ASIDBITS + 0b0000 8 + 0b0010 16 +EndEnum +Enum 3:0 PARANGE + 0b0000 32 + 0b0001 36 + 0b0010 40 + 0b0011 42 + 0b0100 44 + 0b0101 48 + 0b0110 52 +EndEnum +EndSysreg + Sysreg SCTLR_EL1 3 0 1 0 0 Field 63 TIDCP Field 62 SPINMASK -- cgit v1.2.3 From 7d751b313dd9b4cb3a33812cf827decb48352d0b Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 5 Sep 2022 23:54:19 +0100 Subject: arm64/sysreg: Convert ID_AA64MMFR1_EL1 to automatic generation Convert ID_AA64MMFR1_EL1 to be automatically generated as per DDI0487H.a plus ECBHB which was RES0 in DDI0487H.a but has been subsequently defined and is already present in mainline. No functional changes. Signed-off-by: Kristina Martsenko Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-23-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 23 ------------- arch/arm64/tools/sysreg | 71 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 23 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 787d9fa3c8e0..5dcd8dff53b3 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -199,7 +199,6 @@ #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) -#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) #define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) @@ -750,28 +749,6 @@ #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif -/* id_aa64mmfr1 */ -#define ID_AA64MMFR1_EL1_ECBHB_SHIFT 60 -#define ID_AA64MMFR1_EL1_TIDCP1_SHIFT 52 -#define ID_AA64MMFR1_EL1_HCX_SHIFT 40 -#define ID_AA64MMFR1_EL1_AFP_SHIFT 44 -#define ID_AA64MMFR1_EL1_ETS_SHIFT 36 -#define ID_AA64MMFR1_EL1_TWED_SHIFT 32 -#define ID_AA64MMFR1_EL1_XNX_SHIFT 28 -#define ID_AA64MMFR1_EL1_SpecSEI_SHIFT 24 -#define ID_AA64MMFR1_EL1_PAN_SHIFT 20 -#define ID_AA64MMFR1_EL1_LO_SHIFT 16 -#define ID_AA64MMFR1_EL1_HPDS_SHIFT 12 -#define ID_AA64MMFR1_EL1_VH_SHIFT 8 -#define ID_AA64MMFR1_EL1_VMIDBits_SHIFT 4 -#define ID_AA64MMFR1_EL1_HAFDBS_SHIFT 0 - -#define ID_AA64MMFR1_EL1_VMIDBits_8 0 -#define ID_AA64MMFR1_EL1_VMIDBits_16 2 - -#define ID_AA64MMFR1_EL1_TIDCP1_NI 0 -#define ID_AA64MMFR1_EL1_TIDCP1_IMP 1 - /* id_aa64mmfr2 */ #define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 #define ID_AA64MMFR2_EL1_EVT_SHIFT 56 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index c1d800c0d4d5..f56758d7dc04 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -388,6 +388,77 @@ Enum 3:0 PARANGE EndEnum EndSysreg +Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1 +Enum 63:60 ECBHB + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 59:56 CMOW + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 55:52 TIDCP1 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 51:48 nTLBPA + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 47:44 AFP + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 43:40 HCX + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 39:36 ETS + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 35:32 TWED + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 31:28 XNX + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 27:24 SpecSEI + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 23:20 PAN + 0b0000 NI + 0b0001 IMP + 0b0010 PAN2 + 0b0011 PAN3 +EndEnum +Enum 19:16 LO + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 15:12 HPDS + 0b0000 NI + 0b0001 IMP + 0b0010 HPDS2 +EndEnum +Enum 11:8 VH + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 7:4 VMIDBits + 0b0000 8 + 0b0010 16 +EndEnum +Enum 3:0 HAFDBS + 0b0000 NI + 0b0001 AF + 0b0010 DBM +EndEnum +EndSysreg + Sysreg SCTLR_EL1 3 0 1 0 0 Field 63 TIDCP Field 62 SPINMASK -- cgit v1.2.3 From cfa3a6c55b61a062afa1ccd8bca45fd270dd3d0f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:20 +0100 Subject: arm64/sysreg: Convert ID_AA64MMFR2_EL1 to automatic generation Convert ID_AA64MMFR2_EL1 defines to automatic generation as per DDI0487H.a, no functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-24-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 19 ------------ arch/arm64/tools/sysreg | 67 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5dcd8dff53b3..62c5c596b18f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -199,8 +199,6 @@ #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) -#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) - #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) @@ -749,23 +747,6 @@ #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif -/* id_aa64mmfr2 */ -#define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EL1_EVT_SHIFT 56 -#define ID_AA64MMFR2_EL1_BBM_SHIFT 52 -#define ID_AA64MMFR2_EL1_TTL_SHIFT 48 -#define ID_AA64MMFR2_EL1_FWB_SHIFT 40 -#define ID_AA64MMFR2_EL1_IDS_SHIFT 36 -#define ID_AA64MMFR2_EL1_AT_SHIFT 32 -#define ID_AA64MMFR2_EL1_ST_SHIFT 28 -#define ID_AA64MMFR2_EL1_NV_SHIFT 24 -#define ID_AA64MMFR2_EL1_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_EL1_VARange_SHIFT 16 -#define ID_AA64MMFR2_EL1_IESB_SHIFT 12 -#define ID_AA64MMFR2_EL1_LSM_SHIFT 8 -#define ID_AA64MMFR2_EL1_UAO_SHIFT 4 -#define ID_AA64MMFR2_EL1_CnP_SHIFT 0 - /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 #define ID_AA64DFR0_TRBE_SHIFT 44 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index f56758d7dc04..711e8055397b 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -459,6 +459,73 @@ Enum 3:0 HAFDBS EndEnum EndSysreg +Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2 +Enum 63:60 E0PD + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 59:56 EVT + 0b0000 NI + 0b0001 IMP + 0b0010 TTLBxS +EndEnum +Enum 55:52 BBM + 0b0000 0 + 0b0001 1 + 0b0010 2 +EndEnum +Enum 51:48 TTL + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 47:44 +Enum 43:40 FWB + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 39:36 IDS + 0b0000 0x0 + 0b0001 0x18 +EndEnum +Enum 35:32 AT + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 31:28 ST + 0b0000 39 + 0b0001 48_47 +EndEnum +Enum 27:24 NV + 0b0000 NI + 0b0001 IMP + 0b0010 NV2 +EndEnum +Enum 23:20 CCIDX + 0b0000 32 + 0b0001 64 +EndEnum +Enum 19:16 VARange + 0b0000 48 + 0b0001 52 +EndEnum +Enum 15:12 IESB + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 11:8 LSM + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 7:4 UAO + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 3:0 CnP + 0b0000 NI + 0b0001 IMP +EndEnum +EndSysreg + Sysreg SCTLR_EL1 3 0 1 0 0 Field 63 TIDCP Field 62 SPINMASK -- cgit v1.2.3 From cea08f2bf406416c9f54366e1328f2ce329bf4fd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:21 +0100 Subject: arm64/sysreg: Convert ID_AA64PFR0_EL1 to automatic generation Automatically generate the constants for ID_AA64PFR0_EL1 as per DDI0487I.a, no functional changes. The generic defines for the ELx fields are left in place as they remain useful. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-25-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 24 ------------- arch/arm64/tools/sysreg | 76 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 24 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 62c5c596b18f..2f032ea7e7e8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -190,7 +190,6 @@ #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) -#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) #define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) @@ -681,29 +680,6 @@ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) /* id_aa64pfr0 */ -#define ID_AA64PFR0_EL1_CSV3_SHIFT 60 -#define ID_AA64PFR0_EL1_CSV2_SHIFT 56 -#define ID_AA64PFR0_EL1_DIT_SHIFT 48 -#define ID_AA64PFR0_EL1_AMU_SHIFT 44 -#define ID_AA64PFR0_EL1_MPAM_SHIFT 40 -#define ID_AA64PFR0_EL1_SEL2_SHIFT 36 -#define ID_AA64PFR0_EL1_SVE_SHIFT 32 -#define ID_AA64PFR0_EL1_RAS_SHIFT 28 -#define ID_AA64PFR0_EL1_GIC_SHIFT 24 -#define ID_AA64PFR0_EL1_AdvSIMD_SHIFT 20 -#define ID_AA64PFR0_EL1_FP_SHIFT 16 -#define ID_AA64PFR0_EL1_EL3_SHIFT 12 -#define ID_AA64PFR0_EL1_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_EL1_SHIFT 4 -#define ID_AA64PFR0_EL1_EL0_SHIFT 0 - -#define ID_AA64PFR0_EL1_AMU_IMP 0x1 -#define ID_AA64PFR0_EL1_SVE_IMP 0x1 -#define ID_AA64PFR0_EL1_RAS_IMP 0x1 -#define ID_AA64PFR0_EL1_RAS_V1P1 0x2 -#define ID_AA64PFR0_EL1_FP_NI 0xf -#define ID_AA64PFR0_EL1_FP_IMP 0x0 -#define ID_AA64PFR0_EL1_AdvSIMD_NI 0xf #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 711e8055397b..e0b990369a4a 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -46,6 +46,82 @@ # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration # item ACCDATA) though it may be more taseful to do something else. +Sysreg ID_AA64PFR0_EL1 3 0 0 4 0 +Enum 63:60 CSV3 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 59:56 CSV2 + 0b0000 NI + 0b0001 IMP + 0b0010 CSV2_2 + 0b0011 CSV2_3 +EndEnum +Enum 55:52 RME + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 51:48 DIT + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 47:44 AMU + 0b0000 NI + 0b0001 IMP + 0b0010 V1P1 +EndEnum +Enum 43:40 MPAM + 0b0000 0 + 0b0001 1 +EndEnum +Enum 39:36 SEL2 + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 35:32 SVE + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 31:28 RAS + 0b0000 NI + 0b0001 IMP + 0b0010 V1P1 +EndEnum +Enum 27:24 GIC + 0b0000 NI + 0b0001 IMP + 0b0010 V4P1 +EndEnum +Enum 23:20 AdvSIMD + 0b0000 IMP + 0b0001 FP16 + 0b1111 NI +EndEnum +Enum 19:16 FP + 0b0000 IMP + 0b0001 FP16 + 0b1111 NI +EndEnum +Enum 15:12 EL3 + 0b0000 NI + 0b0001 IMP + 0b0010 AARCH32 +EndEnum +Enum 11:8 EL2 + 0b0000 NI + 0b0001 IMP + 0b0010 AARCH32 +EndEnum +Enum 7:4 EL1 + 0b0001 IMP + 0b0010 AARCH32 +EndEnum +Enum 3:0 EL0 + 0b0001 IMP + 0b0010 AARCH32 +EndEnum +EndSysreg + Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Res0 63:60 Enum 59:56 F64MM -- cgit v1.2.3 From ef4ba5a635bfbd98c0893430ddfc9baf9fbccee6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:22 +0100 Subject: arm64/sysreg: Convert ID_AA64PFR1_EL1 to automatic generation Convert ID_AA64PFR1_EL1 to be automatically generated as per DDI04187H.a, no functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-26-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 21 ------------------- arch/arm64/tools/sysreg | 45 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 21 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 2f032ea7e7e8..e78d9dc1024d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -190,8 +190,6 @@ #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) -#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) - #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) @@ -683,25 +681,6 @@ #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 -/* id_aa64pfr1 */ -#define ID_AA64PFR1_EL1_SME_SHIFT 24 -#define ID_AA64PFR1_EL1_MPAM_frac_SHIFT 16 -#define ID_AA64PFR1_EL1_RAS_frac_SHIFT 12 -#define ID_AA64PFR1_EL1_MTE_SHIFT 8 -#define ID_AA64PFR1_EL1_SSBS_SHIFT 4 -#define ID_AA64PFR1_EL1_BT_SHIFT 0 - -#define ID_AA64PFR1_EL1_SSBS_NI 0 -#define ID_AA64PFR1_EL1_SSBS_IMP 1 -#define ID_AA64PFR1_EL1_SSBS_SSBS2 2 -#define ID_AA64PFR1_EL1_BT_IMP 0x1 -#define ID_AA64PFR1_EL1_SME_IMP 1 - -#define ID_AA64PFR1_EL1_MTE_NI 0x0 -#define ID_AA64PFR1_EL1_MTE_IMP 0x1 -#define ID_AA64PFR1_EL1_MTE_MTE2 0x2 -#define ID_AA64PFR1_EL1_MTE_MTE3 0x3 - /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index e0b990369a4a..ca821f9279aa 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -122,6 +122,51 @@ Enum 3:0 EL0 EndEnum EndSysreg +Sysreg ID_AA64PFR1_EL1 3 0 0 4 1 +Res0 63:40 +Enum 39:36 NMI + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 35:32 CSV2_frac + 0b0000 NI + 0b0001 CSV2_1p1 + 0b0010 CSV2_1p2 +EndEnum +Enum 31:28 RNDR_trap + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 27:24 SME + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 23:20 +Enum 19:16 MPAM_frac + 0b0000 MINOR_0 + 0b0001 MINOR_1 +EndEnum +Enum 15:12 RAS_frac + 0b0000 NI + 0b0001 RASv1p1 +EndEnum +Enum 11:8 MTE + 0b0000 NI + 0b0001 IMP + 0b0010 MTE2 + 0b0011 MTE3 +EndEnum +Enum 7:4 SSBS + 0b0000 NI + 0b0001 IMP + 0b0010 SSBS2 +EndEnum +Enum 3:0 BT + 0b0000 NI + 0b0001 IMP +EndEnum +EndSysreg + Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Res0 63:60 Enum 59:56 F64MM -- cgit v1.2.3 From 0a45f3980db0446febd21ae6dff475060fd39a39 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:23 +0100 Subject: arm64/sysreg: Convert TIPDR_EL1 to automatic generation Convert TPIDR_EL1 to automatic generation as per DDI0487H.a, no functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-27-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e78d9dc1024d..61a24737d517 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -429,8 +429,6 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) -#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) - #define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index ca821f9279aa..644ad47e28e5 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -774,6 +774,10 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1 Fields CONTEXTIDR_ELx EndSysreg +Sysreg TPIDR_EL1 3 0 13 0 4 +Field 63:0 ThreadID +EndSysreg + Sysreg CLIDR_EL1 3 1 0 0 1 Res0 63:47 Field 46:33 Ttypen -- cgit v1.2.3 From b1179b75e9a83cfa80accb402dd15ffa6b352080 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:24 +0100 Subject: arm64/sysreg: Convert SCXTNUM_EL1 to automatic generation Convert SCXTNUM_EL1 to automatic generation as per DDI0487H.a, no functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-28-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 61a24737d517..c7876363c6e5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -429,8 +429,6 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) -#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) - #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 644ad47e28e5..6326dca99f3c 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -778,6 +778,10 @@ Sysreg TPIDR_EL1 3 0 13 0 4 Field 63:0 ThreadID EndSysreg +Sysreg SCXTNUM_EL1 3 0 13 0 7 +Field 63:0 SoftwareContextNumber +EndSysreg + Sysreg CLIDR_EL1 3 1 0 0 1 Res0 63:47 Field 46:33 Ttypen -- cgit v1.2.3 From 3e9ae1ce508b8d69762abd1b8b9d9f97d6715b9b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:25 +0100 Subject: arm64/sysreg: Add defintion for ALLINT The FEAT_NMI extension adds a new system register ALLINT for controlling NMI related interrupt masking, add a definition of this register as per DDI0487H.a. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-29-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 6326dca99f3c..5bccf7712ec3 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -761,6 +761,12 @@ Sysreg SMCR_EL1 3 0 1 2 6 Fields SMCR_ELx EndSysreg +Sysreg ALLINT 3 0 4 3 0 +Res0 63:14 +Field 13 ALLINT +Res0 12:0 +EndSysreg + Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg -- cgit v1.2.3 From b3adc3844e1dedd05fa8d09633eaa8ddc5ddcece Mon Sep 17 00:00:00 2001 From: Joey Gouly Date: Tue, 30 Aug 2022 11:48:31 +0100 Subject: arm64: module: move find_section to header Move it to the header so that the implementation can be shared by the alternatives code. Signed-off-by: Joey Gouly Cc: Will Deacon Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220830104833.34636-2-joey.gouly@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/module.h | 15 +++++++++++++++ arch/arm64/kernel/module.c | 15 --------------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 4e7fa2623896..22f1be47cc92 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -65,4 +65,19 @@ static inline bool plt_entry_is_initialized(const struct plt_entry *e) return e->adrp || e->add || e->br; } +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + + return NULL; +} + #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f2d4bb14bfab..76b41e4ca9fa 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -476,21 +476,6 @@ overflow: return -ENOEXEC; } -static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - const char *name) -{ - const Elf_Shdr *s, *se; - const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - - for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { - if (strcmp(name, secstrs + s->sh_name) == 0) - return s; - } - - return NULL; -} - static inline void __init_plt(struct plt_entry *plt, unsigned long addr) { *plt = get_plt_entry(addr, plt); -- cgit v1.2.3 From 4e3bca8f7cdd3b658ee7ad700fdce95b5e13a441 Mon Sep 17 00:00:00 2001 From: Joey Gouly Date: Tue, 30 Aug 2022 11:48:32 +0100 Subject: arm64: alternative: patch alternatives in the vDSO Make it possible to use alternatives in the vDSO, so that better implementations can be used if possible. Signed-off-by: Joey Gouly Cc: Will Deacon Cc: Vincenzo Frascino Cc: Mark Rutland Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220830104833.34636-3-joey.gouly@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/vdso.h | 3 +++ arch/arm64/kernel/alternative.c | 28 ++++++++++++++++++++++++++++ arch/arm64/kernel/vdso.c | 3 --- arch/arm64/kernel/vdso/vdso.lds.S | 7 +++++++ 4 files changed, 38 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index f99dcb94b438..b4ae32109932 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -26,6 +26,9 @@ (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ }) +extern char vdso_start[], vdso_end[]; +extern char vdso32_start[], vdso32_end[]; + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_H */ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 9bcaa5eacf16..a97775963f35 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -10,11 +10,14 @@ #include #include +#include #include #include #include #include +#include #include +#include #include #define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) @@ -192,6 +195,30 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu } } +void apply_alternatives_vdso(void) +{ + struct alt_region region; + const struct elf64_hdr *hdr; + const struct elf64_shdr *shdr; + const struct elf64_shdr *alt; + DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + + bitmap_fill(all_capabilities, ARM64_NPATCHABLE); + + hdr = (struct elf64_hdr *)vdso_start; + shdr = (void *)hdr + hdr->e_shoff; + alt = find_section(hdr, shdr, ".altinstructions"); + if (!alt) + return; + + region = (struct alt_region){ + .begin = (void *)hdr + alt->sh_offset, + .end = (void *)hdr + alt->sh_offset + alt->sh_size, + }; + + __apply_alternatives(®ion, false, &all_capabilities[0]); +} + /* * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here. @@ -225,6 +252,7 @@ static int __apply_alternatives_multi_stop(void *unused) void __init apply_alternatives_all(void) { + apply_alternatives_vdso(); /* better not try code patching on a live SMP system */ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index a61fc4f989b3..ac93a2ee9c07 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -29,9 +29,6 @@ #include #include -extern char vdso_start[], vdso_end[]; -extern char vdso32_start[], vdso32_end[]; - enum vdso_abi { VDSO_ABI_AA64, VDSO_ABI_AA32, diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index e69fb4aaaf3e..6028f1fe2d1c 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -48,6 +48,13 @@ SECTIONS PROVIDE (_etext = .); PROVIDE (etext = .); + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .dynamic : { *(.dynamic) } :text :dynamic .rela.dyn : ALIGN(8) { *(.rela .rela*) } -- cgit v1.2.3 From 9025cebf12d1763de36d5e09e2b0a1e4f9b13b28 Mon Sep 17 00:00:00 2001 From: Joey Gouly Date: Tue, 30 Aug 2022 11:48:33 +0100 Subject: arm64: vdso: use SYS_CNTVCTSS_EL0 for gettimeofday If FEAT_ECV is implemented, the self-synchronized counter CNTVCTSS_EL0 can be used, removing the need for an ISB. Signed-off-by: Joey Gouly Cc: Will Deacon Cc: Vincenzo Frascino Cc: Mark Rutland Cc: Andre Przywara Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220830104833.34636-4-joey.gouly@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/vdso/gettimeofday.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 4f7a629df81f..764d13e2916c 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -7,8 +7,10 @@ #ifndef __ASSEMBLY__ +#include #include #include +#include #define VDSO_HAS_CLOCK_GETRES 1 @@ -78,11 +80,20 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, return 0; /* - * This isb() is required to prevent that the counter value + * If FEAT_ECV is available, use the self-synchronizing counter. + * Otherwise the isb is required to prevent that the counter value * is speculated. - */ - isb(); - asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + */ + asm volatile( + ALTERNATIVE("isb\n" + "mrs %0, cntvct_el0", + "nop\n" + __mrs_s("%0", SYS_CNTVCTSS_EL0), + ARM64_HAS_ECV) + : "=r" (res) + : + : "memory"); + arch_counter_enforce_ordering(res); return res; -- cgit v1.2.3 From 16283c54a6c79b589001763421738db96de3ae4e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:38 +0100 Subject: arm64: stacktrace: fix kerneldoc comments Many of the comment blocks in the arm64 stacktrace code are *almost* kerneldoc, but not quite. Convert them to kerneldoc, as was presumably originally intended. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Kalesh Singh Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 28 +++++++++++++++++----------- arch/arm64/include/asm/stacktrace/nvhe.h | 4 ++-- 2 files changed, 19 insertions(+), 13 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index f58eb944c46f..b8b20ef5aa5d 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -38,9 +38,8 @@ struct stack_info { enum stack_type type; }; -/* - * A snapshot of a frame record or fp/lr register values, along with some - * accounting information necessary for robust unwinding. +/** + * struct unwind_state - state used for robust unwinding. * * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) @@ -113,27 +112,34 @@ static inline void unwind_init_common(struct unwind_state *state, state->prev_type = STACK_TYPE_UNKNOWN; } -/* - * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to - * a kernel address. +/** + * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame + * pointer to a kernel address. * * @fp: the frame pointer to be updated to its kernel address. * @type: the stack type associated with frame pointer @fp * - * Returns true and success and @fp is updated to the corresponding - * kernel virtual address; otherwise returns false. + * Return: true if the VA can be translated, false otherwise. + * + * Upon success @fp is updated to the corresponding kernel virtual address. */ typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, enum stack_type type); -/* - * on_accessible_stack_fn() - Check whether a stack range is on any - * of the possible stacks. +/** + * typedef on_accessible_stack_fn() - Check whether a stack range is on any of + * the possible stacks. * * @tsk: task whose stack is being unwound * @sp: stack address being checked * @size: size of the stack range being checked * @info: stack unwinding context + * + * Return: true if the stack range is accessible, false otherwise. + * + * Upon success @info is updated with information for the relevant stack. + * + * Upon failure @info is updated with the UNKNOWN stack. */ typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, unsigned long sp, unsigned long size, diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index d5527b600390..25ab83a315a7 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -20,8 +20,8 @@ #include -/* - * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc +/** + * kvm_nvhe_unwind_init() - Start an unwind from the given nVHE HYP fp and pc * * @state : unwind_state to initialize * @fp : frame pointer at which to start the unwinding. -- cgit v1.2.3 From bc8d75212d735ac9624c1d3532ad371ec9e570ae Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:39 +0100 Subject: arm64: stacktrace: simplify unwind_next_common() Currently unwind_next_common() takes a pointer to a stack_info which is only ever used within unwind_next_common(). Make it a local variable and simplify callers. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 12 ++++++------ arch/arm64/kernel/stacktrace.c | 3 +-- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 4 +--- arch/arm64/kvm/stacktrace.c | 4 +--- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index b8b20ef5aa5d..0fbae7c78b70 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -146,27 +146,27 @@ typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, struct stack_info *info); static inline int unwind_next_common(struct unwind_state *state, - struct stack_info *info, on_accessible_stack_fn accessible, stack_trace_translate_fp_fn translate_fp) { + struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; struct task_struct *tsk = state->task; if (fp & 0x7) return -EINVAL; - if (!accessible(tsk, fp, 16, info)) + if (!accessible(tsk, fp, 16, &info)) return -EINVAL; - if (test_bit(info->type, state->stacks_done)) + if (test_bit(info.type, state->stacks_done)) return -EINVAL; /* * If fp is not from the current address space perform the necessary * translation before dereferencing it to get the next fp. */ - if (translate_fp && !translate_fp(&kern_fp, info->type)) + if (translate_fp && !translate_fp(&kern_fp, info.type)) return -EINVAL; /* @@ -183,7 +183,7 @@ static inline int unwind_next_common(struct unwind_state *state, * stack to another, it's never valid to unwind back to that first * stack. */ - if (info->type == state->prev_type) { + if (info.type == state->prev_type) { if (fp <= state->prev_fp) return -EINVAL; } else { @@ -197,7 +197,7 @@ static inline int unwind_next_common(struct unwind_state *state, state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); state->prev_fp = fp; - state->prev_type = info->type; + state->prev_type = info.type; return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ce190ee18a20..056fb045d0e0 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -103,14 +103,13 @@ static int notrace unwind_next(struct unwind_state *state) { struct task_struct *tsk = state->task; unsigned long fp = state->fp; - struct stack_info info; int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_common(state, &info, on_accessible_stack, NULL); + err = unwind_next_common(state, on_accessible_stack, NULL); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 58f645ad66bc..50a1fa6b5c04 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -71,9 +71,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - struct stack_info info; - - return unwind_next_common(state, &info, on_accessible_stack, NULL); + return unwind_next_common(state, on_accessible_stack, NULL); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 949d19d603fb..b9cf551d9d31 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -97,9 +97,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - struct stack_info info; - - return unwind_next_common(state, &info, on_accessible_stack, + return unwind_next_common(state, on_accessible_stack, kvm_nvhe_stack_kern_va); } -- cgit v1.2.3 From b532ab5f23389e2141d8b586608f6435f83d5ecd Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:40 +0100 Subject: arm64: stacktrace: rename unwind_next_common() -> unwind_next_frame_record() The unwind_next_common() function unwinds a single frame record. There are other unwind steps (e.g. unwinding through trampolines) which are handled in the regular kernel unwinder, and in future there may be other common unwind helpers. Clarify the purpose of unwind_next_common() by renaming it to unwind_next_frame_record(). At the same time, add commentary, and delete the redundant comment at the top of asm/stacktrace/common.h. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 23 +++++++++++++---------- arch/arm64/kernel/stacktrace.c | 2 +- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 2 +- arch/arm64/kvm/stacktrace.c | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 0fbae7c78b70..a74fa301fe95 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -2,13 +2,6 @@ /* * Common arm64 stack unwinder code. * - * To implement a new arm64 stack unwinder: - * 1) Include this header - * - * 2) Call into unwind_next_common() from your top level unwind - * function, passing it the validation and translation callbacks - * (though the later can be NULL if no translation is required). - * * See: arch/arm64/kernel/stacktrace.c for the reference implementation. * * Copyright (C) 2012 ARM Ltd. @@ -145,9 +138,19 @@ typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info); -static inline int unwind_next_common(struct unwind_state *state, - on_accessible_stack_fn accessible, - stack_trace_translate_fp_fn translate_fp) +/** + * unwind_next_frame_record() - Unwind to the next frame record. + * + * @state: the current unwind state. + * @accessible: determines whether the frame record is accessible + * @translate_fp: translates the fp prior to access (may be NULL) + * + * Return: 0 upon success, an error code otherwise. + */ +static inline int +unwind_next_frame_record(struct unwind_state *state, + on_accessible_stack_fn accessible, + stack_trace_translate_fp_fn translate_fp) { struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 056fb045d0e0..4c8865e495fe 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -109,7 +109,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_common(state, on_accessible_stack, NULL); + err = unwind_next_frame_record(state, on_accessible_stack, NULL); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 50a1fa6b5c04..579b46aa9a55 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -71,7 +71,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - return unwind_next_common(state, on_accessible_stack, NULL); + return unwind_next_frame_record(state, on_accessible_stack, NULL); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index b9cf551d9d31..b69c18a26567 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -97,8 +97,8 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - return unwind_next_common(state, on_accessible_stack, - kvm_nvhe_stack_kern_va); + return unwind_next_frame_record(state, on_accessible_stack, + kvm_nvhe_stack_kern_va); } static void unwind(struct unwind_state *state, -- cgit v1.2.3 From 75758d511432c129db39b50dd3c108e65dd1a2b1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:41 +0100 Subject: arm64: stacktrace: move SDEI stack helpers to stacktrace code For clarity and ease of maintenance, it would be helpful for all the stack helpers to be in the same place. Move the SDEI stack helpers into the stacktrace code where all the other stack helpers live. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: James Morse Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sdei.h | 17 ---------------- arch/arm64/include/asm/stacktrace.h | 40 ++++++++++++++++++++++++++++++++++++- arch/arm64/kernel/sdei.c | 32 ----------------------------- arch/arm64/kernel/stacktrace.c | 11 ++++++++-- 4 files changed, 48 insertions(+), 52 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 7bea1d705dd6..4292d9bafb9d 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -43,22 +43,5 @@ unsigned long do_sdei_event(struct pt_regs *regs, unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) -struct stack_info; - -bool _on_sdei_stack(unsigned long sp, unsigned long size, - struct stack_info *info); -static inline bool on_sdei_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - if (!IS_ENABLED(CONFIG_VMAP_STACK)) - return false; - if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) - return false; - if (in_nmi()) - return _on_sdei_stack(sp, size, info); - - return false; -} - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SDEI_H */ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 6ebdcdff77f5..fa2df1ea22eb 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -54,7 +54,45 @@ static inline bool on_overflow_stack(unsigned long sp, unsigned long size, } #else static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) { return false; } + struct stack_info *info) +{ + return false; +} +#endif + +#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) +DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); +DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); + +static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); + unsigned long high = low + SDEI_STACK_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); +} + +static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); + unsigned long high = low + SDEI_STACK_SIZE; + + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); +} +#else +static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + return false; +} + +static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, + struct stack_info *info) +{ + return false; +} #endif #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index d20620a1c51a..d56e170e1ca7 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -162,38 +162,6 @@ static int init_sdei_scs(void) return err; } -static bool on_sdei_normal_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); - unsigned long high = low + SDEI_STACK_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); -} - -static bool on_sdei_critical_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); - unsigned long high = low + SDEI_STACK_SIZE; - - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); -} - -bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info) -{ - if (!IS_ENABLED(CONFIG_VMAP_STACK)) - return false; - - if (on_sdei_critical_stack(sp, size, info)) - return true; - - if (on_sdei_normal_stack(sp, size, info)) - return true; - - return false; -} - unsigned long sdei_arch_get_entry_point(int conduit) { /* diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 4c8865e495fe..edf9edca2055 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -86,8 +86,15 @@ static bool on_accessible_stack(const struct task_struct *tsk, return true; if (on_overflow_stack(sp, size, info)) return true; - if (on_sdei_stack(sp, size, info)) - return true; + + if (IS_ENABLED(CONFIG_VMAP_STACK) && + IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) && + in_nmi()) { + if (on_sdei_critical_stack(sp, size, info)) + return true; + if (on_sdei_normal_stack(sp, size, info)) + return true; + } return false; } -- cgit v1.2.3 From 36f9a8793c16da01dffe0718b66c884933b06b98 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:42 +0100 Subject: arm64: stacktrace: add stackinfo_on_stack() helper Factor the core predicate out of on_stack() into a helper which can be used on a pre-populated stack_info. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index a74fa301fe95..81c21378b1ac 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -65,21 +65,34 @@ struct unwind_state { struct task_struct *task; }; +static inline bool stackinfo_on_stack(const struct stack_info *info, + unsigned long sp, unsigned long size) +{ + if (!info->low) + return false; + + if (sp < info->low || sp + size < sp || sp + size > info->high) + return false; + + return true; +} + static inline bool on_stack(unsigned long sp, unsigned long size, unsigned long low, unsigned long high, enum stack_type type, struct stack_info *info) { - if (!low) - return false; + struct stack_info tmp = { + .low = low, + .high = high, + .type = type, + }; - if (sp < low || sp + size < sp || sp + size > high) + if (!stackinfo_on_stack(&tmp, sp, size)) return false; - if (info) { - info->low = low; - info->high = high; - info->type = type; - } + if (info) + *info = tmp; + return true; } -- cgit v1.2.3 From d1f684e46bbd43eac5c6fb00906c57425d7022a6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:43 +0100 Subject: arm64: stacktrace: rework stack boundary discovery In subsequent patches we'll want to acquire the stack boundaries ahead-of-time, and we'll need to be able to acquire the relevant stack_info regardless of whether we have an object the happens to be on the stack. This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX() helpers, with the caller being responsible for the checking whether an object is on a relevant stack. For the moment this is moved into the on_accessible_stack() functions, making these slightly larger; subsequent patches will remove the on_accessible_stack() functions and simplify the logic. The on_irq_stack() and on_task_stack() helpers are kept as these are used by IRQ entry sequences and stackleak respectively. As they're only used as predicates, the stack_info pointer parameter is removed in both cases. As the on_accessible_stack() functions are always passed a non-NULL info pointer, these now update info unconditionally. When updating the type to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as these will not be consumed this should have no adverse affect. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/include/asm/stacktrace.h | 78 ++++++++++++++++++------------ arch/arm64/include/asm/stacktrace/common.h | 28 ++++------- arch/arm64/kernel/ptrace.c | 2 +- arch/arm64/kernel/stacktrace.c | 65 ++++++++++++++++--------- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 37 ++++++++++---- arch/arm64/kvm/stacktrace.c | 37 ++++++++++---- 7 files changed, 153 insertions(+), 96 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 86eb0bfe3b38..61883518fc50 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task); * The top of the current task's task stack */ #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) -#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1)) #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index fa2df1ea22eb..aad0c6258721 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_irq_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_irq(void) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_IRQ, + }; } -static inline bool on_task_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) +static inline bool on_irq_stack(unsigned long sp, unsigned long size) +{ + struct stack_info info = stackinfo_get_irq(); + return stackinfo_on_stack(&info, sp, size); +} + +static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_TASK, + }; +} + +static inline bool on_task_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size) +{ + struct stack_info info = stackinfo_get_task(tsk); + return stackinfo_on_stack(&info, sp, size); } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_overflow(void) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } #else -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} +#define stackinfo_get_overflow() stackinfo_get_unknown() #endif #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); -static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_sdei_normal(void) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_SDEI_NORMAL, + }; } -static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_sdei_critical(void) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_SDEI_CRITICAL, + }; } #else -static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} - -static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} +#define stackinfo_get_sdei_normal() stackinfo_get_unknown() +#define stackinfo_get_sdei_critical() stackinfo_get_unknown() #endif #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 81c21378b1ac..3c3bda34bb87 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -65,6 +65,15 @@ struct unwind_state { struct task_struct *task; }; +static inline struct stack_info stackinfo_get_unknown(void) +{ + return (struct stack_info) { + .low = 0, + .high = 0, + .type = STACK_TYPE_UNKNOWN, + }; +} + static inline bool stackinfo_on_stack(const struct stack_info *info, unsigned long sp, unsigned long size) { @@ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info, return true; } -static inline bool on_stack(unsigned long sp, unsigned long size, - unsigned long low, unsigned long high, - enum stack_type type, struct stack_info *info) -{ - struct stack_info tmp = { - .low = low, - .high = high, - .type = type, - }; - - if (!stackinfo_on_stack(&tmp, sp, size)) - return false; - - if (info) - *info = tmp; - - return true; -} - static inline void unwind_init_common(struct unwind_state *state, struct task_struct *task) { diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index eb7c08dfb834..00c05da7112c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || - on_irq_stack(addr, sizeof(unsigned long), NULL); + on_irq_stack(addr, sizeof(unsigned long)); } /** diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index edf9edca2055..ca56fd732c2a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -/* - * We can only safely access per-cpu stacks from current in a non-preemptible - * context. - */ static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - if (on_task_stack(tsk, sp, size, info)) - return true; - if (tsk != current || preemptible()) - return false; - if (on_irq_stack(sp, size, info)) - return true; - if (on_overflow_stack(sp, size, info)) - return true; - - if (IS_ENABLED(CONFIG_VMAP_STACK) && - IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) && - in_nmi()) { - if (on_sdei_critical_stack(sp, size, info)) - return true; - if (on_sdei_normal_stack(sp, size, info)) - return true; - } + tmp = stackinfo_get_task(tsk); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + /* + * We can only safely access per-cpu stacks when unwinding the current + * task in a non-preemptible context. + */ + if (tsk != current || preemptible()) + goto not_found; + + tmp = stackinfo_get_irq(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + /* + * We can only safely access SDEI stacks which unwinding the current + * task in an NMI context. + */ + if (!IS_ENABLED(CONFIG_VMAP_STACK) || + !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || + !in_nmi()) + goto not_found; + + tmp = stackinfo_get_sdei_normal(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_sdei_critical(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + +not_found: + *info = stackinfo_get_unknown(); return false; + +found: + *info = tmp; + return true; } /* diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 579b46aa9a55..5da0d44f61b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); -static bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_overflow(void) { unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } -static bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); unsigned long high = params->stack_hyp_va; unsigned long low = high - PAGE_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; } static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - return (on_overflow_stack(sp, size, info) || - on_hyp_stack(sp, size, info)); + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_hyp(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + *info = stackinfo_get_unknown(); + return false; + +found: + *info = tmp; + return true; } static int unwind_next(struct unwind_state *state) diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index b69c18a26567..26927344a263 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, return true; } -static bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_overflow(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } -static bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->stack_base; unsigned long high = low + PAGE_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; } static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - return (on_overflow_stack(sp, size, info) || - on_hyp_stack(sp, size, info)); + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_hyp(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + *info = stackinfo_get_unknown(); + return false; + +found: + *info = tmp; + return true; } static int unwind_next(struct unwind_state *state) -- cgit v1.2.3 From bd8abd68836b5c2b668afc4fb46d85d687779dec Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:44 +0100 Subject: arm64: stacktrace: remove stack type from fp translator In subsequent patches we'll remove the stack_type enum, and move the FP translation logic out of the raw FP unwind code. In preparation for doing so, this patch removes the type parameter from the FP translation callback, and modifies kvm_nvhe_stack_kern_va() to determine the relevant stack directly. So that kvm_nvhe_stack_kern_va() can use the stackinfo_*() helpers, these are moved earlier in the file, but are not modified in any way. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 6 +-- arch/arm64/kvm/stacktrace.c | 82 +++++++++++++++++------------- 2 files changed, 49 insertions(+), 39 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 3c3bda34bb87..2f972441f572 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -113,14 +113,12 @@ static inline void unwind_init_common(struct unwind_state *state, * pointer to a kernel address. * * @fp: the frame pointer to be updated to its kernel address. - * @type: the stack type associated with frame pointer @fp * * Return: true if the VA can be translated, false otherwise. * * Upon success @fp is updated to the corresponding kernel virtual address. */ -typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, - enum stack_type type); +typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); /** * typedef on_accessible_stack_fn() - Check whether a stack range is on any of @@ -172,7 +170,7 @@ unwind_next_frame_record(struct unwind_state *state, * If fp is not from the current address space perform the necessary * translation before dereferencing it to get the next fp. */ - if (translate_fp && !translate_fp(&kern_fp, info.type)) + if (translate_fp && !translate_fp(&kern_fp)) return -EINVAL; /* diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 26927344a263..7658c5db47c1 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -21,6 +21,34 @@ #include +static struct stack_info stackinfo_get_overflow(void) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; +} + +static struct stack_info stackinfo_get_hyp(void) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->stack_base; + unsigned long high = low + PAGE_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; +} + /* * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * @@ -34,27 +62,31 @@ * Returns true on success and updates @addr to its corresponding kernel VA; * otherwise returns false. */ -static bool kvm_nvhe_stack_kern_va(unsigned long *addr, - enum stack_type type) +static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size) { struct kvm_nvhe_stacktrace_info *stacktrace_info; unsigned long hyp_base, kern_base, hyp_offset; + struct stack_info stack; stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - switch (type) { - case STACK_TYPE_HYP: + stack = stackinfo_get_hyp(); + if (stackinfo_on_stack(&stack, *addr, size)) { kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); hyp_base = (unsigned long)stacktrace_info->stack_base; - break; - case STACK_TYPE_OVERFLOW: + goto found; + } + + stack = stackinfo_get_overflow(); + if (stackinfo_on_stack(&stack, *addr, size)) { kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); hyp_base = (unsigned long)stacktrace_info->overflow_stack_base; - break; - default: - return false; + goto found; } + return false; + +found: hyp_offset = *addr - hyp_base; *addr = kern_base + hyp_offset; @@ -62,32 +94,12 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, return true; } -static struct stack_info stackinfo_get_overflow(void) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; - unsigned long high = low + OVERFLOW_STACK_SIZE; - - return (struct stack_info) { - .low = low, - .high = high, - .type = STACK_TYPE_OVERFLOW, - }; -} - -static struct stack_info stackinfo_get_hyp(void) +/* + * Convert a KVN nVHE HYP frame record address to a kernel VA + */ +static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) { - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->stack_base; - unsigned long high = low + PAGE_SIZE; - - return (struct stack_info) { - .low = low, - .high = high, - .type = STACK_TYPE_HYP, - }; + return kvm_nvhe_stack_kern_va(addr, 16); } static bool on_accessible_stack(const struct task_struct *tsk, @@ -115,7 +127,7 @@ found: static int unwind_next(struct unwind_state *state) { return unwind_next_frame_record(state, on_accessible_stack, - kvm_nvhe_stack_kern_va); + kvm_nvhe_stack_kern_record_va); } static void unwind(struct unwind_state *state, -- cgit v1.2.3 From 8df137300d1964c3810991aa2fe17a105348b647 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:45 +0100 Subject: arm64: stacktrace: track all stack boundaries explicitly Currently we call an on_accessible_stack() callback for each step of the unwinder, requiring redundant work to be performed in the core of the unwind loop (e.g. disabling preemption around accesses to per-cpu variables containing stack boundaries). To prevent unwind loops which go through a stack multiple times, we have to track the set of unwound stacks, requiring a stack_type enum which needs to cater for all the stacks of all possible callees. To prevent loops within a stack, we must track the prior FP values. This patch reworks the unwinder to minimize the work in the core of the unwinder, and to remove the need for the stack_type enum. The set of accessible stacks (and their boundaries) are determined at the start of the unwind, and the current stack is tracked during the unwind, with completed stacks removed from the set of accessible stacks. This makes the boundary checks more accurate (e.g. detecting overlapped frame records), and removes the need for separate tracking of the prior FP and visited stacks. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace.h | 5 - arch/arm64/include/asm/stacktrace/common.h | 164 +++++++++++++---------------- arch/arm64/kernel/stacktrace.c | 91 +++++++--------- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 35 ++---- arch/arm64/kvm/stacktrace.c | 36 ++----- 5 files changed, 132 insertions(+), 199 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index aad0c6258721..5a0edb064ea4 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -30,7 +30,6 @@ static inline struct stack_info stackinfo_get_irq(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_IRQ, }; } @@ -48,7 +47,6 @@ static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_TASK, }; } @@ -70,7 +68,6 @@ static inline struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } #else @@ -89,7 +86,6 @@ static inline struct stack_info stackinfo_get_sdei_normal(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_SDEI_NORMAL, }; } @@ -101,7 +97,6 @@ static inline struct stack_info stackinfo_get_sdei_critical(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_SDEI_CRITICAL, }; } #else diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 2f972441f572..638008f48597 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -9,26 +9,12 @@ #ifndef __ASM_STACKTRACE_COMMON_H #define __ASM_STACKTRACE_COMMON_H -#include -#include #include #include -enum stack_type { - STACK_TYPE_UNKNOWN, - STACK_TYPE_TASK, - STACK_TYPE_IRQ, - STACK_TYPE_OVERFLOW, - STACK_TYPE_SDEI_NORMAL, - STACK_TYPE_SDEI_CRITICAL, - STACK_TYPE_HYP, - __NR_STACK_TYPES -}; - struct stack_info { unsigned long low; unsigned long high; - enum stack_type type; }; /** @@ -37,32 +23,27 @@ struct stack_info { * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) * - * @stacks_done: Stacks which have been entirely unwound, for which it is no - * longer valid to unwind to. - * - * @prev_fp: The fp that pointed to this frame record, or a synthetic value - * of 0. This is used to ensure that within a stack, each - * subsequent frame record is at an increasing address. - * @prev_type: The type of stack this frame record was on, or a synthetic - * value of STACK_TYPE_UNKNOWN. This is used to detect a - * transition from one stack to another. - * * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * associated with the most recently encountered replacement lr * value. * * @task: The task being unwound. + * + * @stack: The stack currently being unwound. + * @stacks: An array of stacks which can be unwound. + * @nr_stacks: The number of stacks in @stacks. */ struct unwind_state { unsigned long fp; unsigned long pc; - DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); - unsigned long prev_fp; - enum stack_type prev_type; #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif struct task_struct *task; + + struct stack_info stack; + struct stack_info *stacks; + int nr_stacks; }; static inline struct stack_info stackinfo_get_unknown(void) @@ -70,7 +51,6 @@ static inline struct stack_info stackinfo_get_unknown(void) return (struct stack_info) { .low = 0, .high = 0, - .type = STACK_TYPE_UNKNOWN, }; } @@ -94,18 +74,7 @@ static inline void unwind_init_common(struct unwind_state *state, state->kr_cur = NULL; #endif - /* - * Prime the first unwind. - * - * In unwind_next() we'll check that the FP points to a valid stack, - * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be - * treated as a transition to whichever stack that happens to be. The - * prev_fp value won't be used, but we set it to 0 such that it is - * definitely not an accessible stack address. - */ - bitmap_zero(state->stacks_done, __NR_STACK_TYPES); - state->prev_fp = 0; - state->prev_type = STACK_TYPE_UNKNOWN; + state->stack = stackinfo_get_unknown(); } /** @@ -120,51 +89,94 @@ static inline void unwind_init_common(struct unwind_state *state, */ typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); +static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, + unsigned long sp, + unsigned long size) +{ + for (int i = 0; i < state->nr_stacks; i++) { + struct stack_info *info = &state->stacks[i]; + + if (stackinfo_on_stack(info, sp, size)) + return info; + } + + return NULL; +} + /** - * typedef on_accessible_stack_fn() - Check whether a stack range is on any of - * the possible stacks. - * - * @tsk: task whose stack is being unwound - * @sp: stack address being checked - * @size: size of the stack range being checked - * @info: stack unwinding context - * - * Return: true if the stack range is accessible, false otherwise. + * unwind_consume_stack() - Check if an object is on an accessible stack, + * updating stack boundaries so that future unwind steps cannot consume this + * object again. * - * Upon success @info is updated with information for the relevant stack. + * @state: the current unwind state. + * @sp: the base address of the object. + * @size: the size of the object. * - * Upon failure @info is updated with the UNKNOWN stack. + * Return: 0 upon success, an error code otherwise. */ -typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info); +static inline int unwind_consume_stack(struct unwind_state *state, + unsigned long sp, + unsigned long size) +{ + struct stack_info *next; + + if (stackinfo_on_stack(&state->stack, sp, size)) + goto found; + + next = unwind_find_next_stack(state, sp, size); + if (!next) + return -EINVAL; + + /* + * Stack transitions are strictly one-way, and once we've + * transitioned from one stack to another, it's never valid to + * unwind back to the old stack. + * + * Remove the current stack from the list of stacks so that it cannot + * be found on a subsequent transition. + * + * Note that stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * HYP -> OVERFLOW + * + * ... so we do not check the specific order of stack + * transitions. + */ + state->stack = *next; + *next = stackinfo_get_unknown(); + +found: + /* + * Future unwind steps can only consume stack above this frame record. + * Update the current stack to start immediately above it. + */ + state->stack.low = sp + size; + return 0; +} /** * unwind_next_frame_record() - Unwind to the next frame record. * * @state: the current unwind state. - * @accessible: determines whether the frame record is accessible * @translate_fp: translates the fp prior to access (may be NULL) * * Return: 0 upon success, an error code otherwise. */ static inline int unwind_next_frame_record(struct unwind_state *state, - on_accessible_stack_fn accessible, stack_trace_translate_fp_fn translate_fp) { - struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; - struct task_struct *tsk = state->task; + int err; if (fp & 0x7) return -EINVAL; - if (!accessible(tsk, fp, 16, &info)) - return -EINVAL; - - if (test_bit(info.type, state->stacks_done)) - return -EINVAL; + err = unwind_consume_stack(state, fp, 16); + if (err) + return err; /* * If fp is not from the current address space perform the necessary @@ -174,34 +186,10 @@ unwind_next_frame_record(struct unwind_state *state, return -EINVAL; /* - * As stacks grow downward, any valid record on the same stack must be - * at a strictly higher address than the prior record. - * - * Stacks can nest in several valid orders, e.g. - * - * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL - * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW - * HYP -> OVERFLOW - * - * ... but the nesting itself is strict. Once we transition from one - * stack to another, it's never valid to unwind back to that first - * stack. - */ - if (info.type == state->prev_type) { - if (fp <= state->prev_fp) - return -EINVAL; - } else { - __set_bit(state->prev_type, state->stacks_done); - } - - /* - * Record this frame record's values and location. The prev_fp and - * prev_type are only meaningful to the next unwind_next() invocation. + * Record this frame record's values. */ state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); - state->prev_fp = fp; - state->prev_type = info.type; return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ca56fd732c2a..9c8820f24262 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -67,57 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_task(tsk); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - /* - * We can only safely access per-cpu stacks when unwinding the current - * task in a non-preemptible context. - */ - if (tsk != current || preemptible()) - goto not_found; - - tmp = stackinfo_get_irq(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - /* - * We can only safely access SDEI stacks which unwinding the current - * task in an NMI context. - */ - if (!IS_ENABLED(CONFIG_VMAP_STACK) || - !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || - !in_nmi()) - goto not_found; - - tmp = stackinfo_get_sdei_normal(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_sdei_critical(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - -not_found: - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - /* * Unwind from one frame record (A) to the next frame record (B). * @@ -135,7 +84,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state, on_accessible_stack, NULL); + err = unwind_next_frame_record(state, NULL); if (err) return err; @@ -215,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) barrier(); } +/* + * Per-cpu stacks are only accessible when unwinding the current task in a + * non-preemptible context. + */ +#define STACKINFO_CPU(name) \ + ({ \ + ((task == current) && !preemptible()) \ + ? stackinfo_get_##name() \ + : stackinfo_get_unknown(); \ + }) + +/* + * SDEI stacks are only accessible when unwinding the current task in an NMI + * context. + */ +#define STACKINFO_SDEI(name) \ + ({ \ + ((task == current) && in_nmi()) \ + ? stackinfo_get_sdei_##name() \ + : stackinfo_get_unknown(); \ + }) + noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), +#endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; if (regs) { if (task != current) diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 5da0d44f61b7..08e1325ead73 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -47,7 +47,6 @@ static struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } @@ -60,35 +59,12 @@ static struct stack_info stackinfo_get_hyp(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_HYP, }; } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_hyp(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, on_accessible_stack, NULL); + return unwind_next_frame_record(state, NULL); } static void notrace unwind(struct unwind_state *state, @@ -144,7 +120,14 @@ static bool pkvm_save_backtrace_entry(void *arg, unsigned long where) */ static void pkvm_save_backtrace(unsigned long fp, unsigned long pc) { - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_overflow(), + stackinfo_get_hyp(), + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; int idx = 0; kvm_nvhe_unwind_init(&state, fp, pc); diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 7658c5db47c1..0b4703945780 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -31,7 +31,6 @@ static struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } @@ -45,7 +44,6 @@ static struct stack_info stackinfo_get_hyp(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_HYP, }; } @@ -102,32 +100,9 @@ static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) return kvm_nvhe_stack_kern_va(addr, 16); } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_hyp(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, on_accessible_stack, - kvm_nvhe_stack_kern_record_va); + return unwind_next_frame_record(state, kvm_nvhe_stack_kern_record_va); } static void unwind(struct unwind_state *state, @@ -185,7 +160,14 @@ static void kvm_nvhe_dump_backtrace_end(void) static void hyp_dump_backtrace(unsigned long hyp_offset) { struct kvm_nvhe_stacktrace_info *stacktrace_info; - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_overflow(), + stackinfo_get_hyp(), + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); -- cgit v1.2.3 From 4b5e694e25ca2cbb5c97694a7d2a473f35099829 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:46 +0100 Subject: arm64: stacktrace: track hyp stacks in unwinder's address space Currently unwind_next_frame_record() has an optional callback to convert the address space of the FP. This is necessary for the NVHE unwinder, which tracks the stacks in the hyp VA space, but accesses the frame records in the kernel VA space. This is a bit unfortunate since it clutters unwind_next_frame_record(), which will get in the way of future rework. Instead, this patch changes the NVHE unwinder to track the stacks in the kernel's VA space and translate to FP prior to calling unwind_next_frame_record(). This removes the need for the translate_fp() callback, as all unwinders consistently track stacks in the native address space of the unwinder. At the same time, this patch consolidates the generation of the stack addresses behind the stackinfo_get_*() helpers. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 29 ++------------ arch/arm64/kernel/stacktrace.c | 2 +- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 2 +- arch/arm64/kvm/stacktrace.c | 62 +++++++++++++++++++----------- 4 files changed, 46 insertions(+), 49 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 638008f48597..508f734de46e 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -77,18 +77,6 @@ static inline void unwind_init_common(struct unwind_state *state, state->stack = stackinfo_get_unknown(); } -/** - * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame - * pointer to a kernel address. - * - * @fp: the frame pointer to be updated to its kernel address. - * - * Return: true if the VA can be translated, false otherwise. - * - * Upon success @fp is updated to the corresponding kernel virtual address. - */ -typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); - static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, unsigned long sp, unsigned long size) @@ -160,15 +148,13 @@ found: * unwind_next_frame_record() - Unwind to the next frame record. * * @state: the current unwind state. - * @translate_fp: translates the fp prior to access (may be NULL) * * Return: 0 upon success, an error code otherwise. */ static inline int -unwind_next_frame_record(struct unwind_state *state, - stack_trace_translate_fp_fn translate_fp) +unwind_next_frame_record(struct unwind_state *state) { - unsigned long fp = state->fp, kern_fp = fp; + unsigned long fp = state->fp; int err; if (fp & 0x7) @@ -178,18 +164,11 @@ unwind_next_frame_record(struct unwind_state *state, if (err) return err; - /* - * If fp is not from the current address space perform the necessary - * translation before dereferencing it to get the next fp. - */ - if (translate_fp && !translate_fp(&kern_fp)) - return -EINVAL; - /* * Record this frame record's values. */ - state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); - state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); + state->fp = READ_ONCE(*(unsigned long *)(fp)); + state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 9c8820f24262..634279b3b03d 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -84,7 +84,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state, NULL); + err = unwind_next_frame_record(state); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 08e1325ead73..ed6b58b19cfa 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -64,7 +64,7 @@ static struct stack_info stackinfo_get_hyp(void) static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, NULL); + return unwind_next_frame_record(state); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 0b4703945780..3ace5b75813b 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -34,6 +34,17 @@ static struct stack_info stackinfo_get_overflow(void) }; } +static struct stack_info stackinfo_get_overflow_kern_va(void) +{ + unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} + static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info @@ -47,6 +58,17 @@ static struct stack_info stackinfo_get_hyp(void) }; } +static struct stack_info stackinfo_get_hyp_kern_va(void) +{ + unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); + unsigned long high = low + PAGE_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} + /* * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * @@ -62,33 +84,22 @@ static struct stack_info stackinfo_get_hyp(void) */ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size) { - struct kvm_nvhe_stacktrace_info *stacktrace_info; - unsigned long hyp_base, kern_base, hyp_offset; - struct stack_info stack; + struct stack_info stack_hyp, stack_kern; - stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - - stack = stackinfo_get_hyp(); - if (stackinfo_on_stack(&stack, *addr, size)) { - kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); - hyp_base = (unsigned long)stacktrace_info->stack_base; + stack_hyp = stackinfo_get_hyp(); + stack_kern = stackinfo_get_hyp_kern_va(); + if (stackinfo_on_stack(&stack_hyp, *addr, size)) goto found; - } - stack = stackinfo_get_overflow(); - if (stackinfo_on_stack(&stack, *addr, size)) { - kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); - hyp_base = (unsigned long)stacktrace_info->overflow_stack_base; + stack_hyp = stackinfo_get_overflow(); + stack_kern = stackinfo_get_overflow_kern_va(); + if (stackinfo_on_stack(&stack_hyp, *addr, size)) goto found; - } return false; found: - hyp_offset = *addr - hyp_base; - - *addr = kern_base + hyp_offset; - + *addr = *addr - stack_hyp.low + stack_kern.low; return true; } @@ -102,7 +113,14 @@ static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, kvm_nvhe_stack_kern_record_va); + /* + * The FP is in the hypervisor VA space. Convert it to the kernel VA + * space so it can be unwound by the regular unwind functions. + */ + if (!kvm_nvhe_stack_kern_record_va(&state->fp)) + return -EINVAL; + + return unwind_next_frame_record(state); } static void unwind(struct unwind_state *state, @@ -161,8 +179,8 @@ static void hyp_dump_backtrace(unsigned long hyp_offset) { struct kvm_nvhe_stacktrace_info *stacktrace_info; struct stack_info stacks[] = { - stackinfo_get_overflow(), - stackinfo_get_hyp(), + stackinfo_get_overflow_kern_va(), + stackinfo_get_hyp_kern_va(), }; struct unwind_state state = { .stacks = stacks, -- cgit v1.2.3 From b2c3ccbd0011bb3b51d0fec24cb3a5812b1ec8ea Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 17 Aug 2022 16:59:13 +0100 Subject: arm64: atomics: remove LL/SC trampolines When CONFIG_ARM64_LSE_ATOMICS=y, each use of an LL/SC atomic results in a fragment of code being generated in a subsection without a clear association with its caller. A trampoline in the caller branches to the LL/SC atomic with with a direct branch, and the atomic directly branches back into its trampoline. This breaks backtracing, as any PC within the out-of-line fragment will be symbolized as an offset from the nearest prior symbol (which may not be the function using the atomic), and since the atomic returns with a direct branch, the caller's PC may be missing from the backtrace. For example, with secondary_start_kernel() hacked to contain atomic_inc(NULL), the resulting exception can be reported as being taken from cpus_are_stuck_in_kernel(): | Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 | Mem abort info: | ESR = 0x0000000096000004 | EC = 0x25: DABT (current EL), IL = 32 bits | SET = 0, FnV = 0 | EA = 0, S1PTW = 0 | FSC = 0x04: level 0 translation fault | Data abort info: | ISV = 0, ISS = 0x00000004 | CM = 0, WnR = 0 | [0000000000000000] user address but active_mm is swapper | Internal error: Oops: 96000004 [#1] PREEMPT SMP | Modules linked in: | CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.19.0-11219-geb555cb5b794-dirty #3 | Hardware name: linux,dummy-virt (DT) | pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : cpus_are_stuck_in_kernel+0xa4/0x120 | lr : secondary_start_kernel+0x164/0x170 | sp : ffff80000a4cbe90 | x29: ffff80000a4cbe90 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: 0000000000000000 x22: 0000000000000000 x21: 0000000000000000 | x20: 0000000000000001 x19: 0000000000000001 x18: 0000000000000008 | x17: 3030383832343030 x16: 3030303030307830 x15: ffff80000a4cbab0 | x14: 0000000000000001 x13: 5d31666130663133 x12: 3478305b20313030 | x11: 3030303030303078 x10: 3020726f73736563 x9 : 726f737365636f72 | x8 : ffff800009ff2ef0 x7 : 0000000000000003 x6 : 0000000000000000 | x5 : 0000000000000000 x4 : 0000000000000000 x3 : 0000000000000100 | x2 : 0000000000000000 x1 : ffff0000029bd880 x0 : 0000000000000000 | Call trace: | cpus_are_stuck_in_kernel+0xa4/0x120 | __secondary_switched+0xb0/0xb4 | Code: 35ffffa3 17fffc6c d53cd040 f9800011 (885f7c01) | ---[ end trace 0000000000000000 ]--- This is confusing and hinders debugging, and will be problematic for CONFIG_LIVEPATCH as these cases cannot be unwound reliably. This is very similar to recent issues with out-of-line exception fixups, which were removed in commits: 35d67794b8828333 ("arm64: lib: __arch_clear_user(): fold fixups into body") 4012e0e22739eef9 ("arm64: lib: __arch_copy_from_user(): fold fixups into body") 139f9ab73d60cf76 ("arm64: lib: __arch_copy_to_user(): fold fixups into body") When the trampolines were introduced in commit: addfc38672c73efd ("arm64: atomics: avoid out-of-line ll/sc atomics") The rationale was to improve icache performance by grouping the LL/SC atomics together. This has never been measured, and this theoretical benefit is outweighed by other factors: * As the subsections are collapsed into sections at object file granularity, these are spread out throughout the kernel and can share cachelines with unrelated code regardless. * GCC 12.1.0 has been observed to place the trampoline out-of-line in specialised __ll_sc_*() functions, introducing more branching than was intended. * Removing the trampolines has been observed to shrink a defconfig kernel Image by 64KiB when building with GCC 12.1.0. This patch removes the LL/SC trampolines, meaning that the LL/SC atomics will be inlined into their callers (or placed in out-of line functions using regular BL/RET pairs). When CONFIG_ARM64_LSE_ATOMICS=y, the LL/SC atomics are always called in an unlikely branch, and will be placed in a cold portion of the function, so this should have minimal impact to the hot paths. Other than the improved backtracing, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Will Deacon Link: https://lore.kernel.org/r/20220817155914.3975112-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/atomic_ll_sc.h | 40 ++++++++--------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index fe0db8d416fb..906e2d8c254c 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -12,19 +12,6 @@ #include -#ifdef CONFIG_ARM64_LSE_ATOMICS -#define __LL_SC_FALLBACK(asm_ops) \ -" b 3f\n" \ -" .subsection 1\n" \ -"3:\n" \ -asm_ops "\n" \ -" b 4f\n" \ -" .previous\n" \ -"4:\n" -#else -#define __LL_SC_FALLBACK(asm_ops) asm_ops -#endif - #ifndef CONFIG_CC_HAS_K_CONSTRAINT #define K #endif @@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ int result; \ \ asm volatile("// atomic_" #op "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " stxr %w1, %w0, %2\n" \ - " cbnz %w1, 1b\n") \ + " cbnz %w1, 1b\n" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ int result; \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " st" #rel "xr %w1, %w0, %2\n" \ " cbnz %w1, 1b\n" \ - " " #mb ) \ + " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ int val, result; \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %w0, %3\n" \ " " #asm_op " %w1, %w0, %w4\n" \ " st" #rel "xr %w2, %w1, %3\n" \ " cbnz %w2, 1b\n" \ - " " #mb ) \ + " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " stxr %w1, %0, %2\n" \ - " cbnz %w1, 1b") \ + " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " st" #rel "xr %w1, %0, %2\n" \ " cbnz %w1, 1b\n" \ - " " #mb ) \ + " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -182,13 +164,12 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %0, %3\n" \ " " #asm_op " %1, %0, %4\n" \ " st" #rel "xr %w2, %1, %3\n" \ " cbnz %w2, 1b\n" \ - " " #mb ) \ + " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -240,7 +221,6 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" - __LL_SC_FALLBACK( " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " subs %0, %0, #1\n" @@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" " dmb ish\n" - "2:") + "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); @@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ old = (u##sz)old; \ \ asm volatile( \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %[v]\n" \ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ @@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ - "2:") \ + "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ @@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ - __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ @@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ " st" #rel "xp %w0, %5, %6, %2\n" \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ - "2:") \ + "2:" \ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : cl); \ -- cgit v1.2.3 From 78f6f5c994ed22a35ce1cd3ec9aeda8e2fa328e6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 17 Aug 2022 16:59:14 +0100 Subject: arm64: atomic: always inline the assembly The __lse_*() and __ll_sc_*() atomic implementations are marked as inline rather than __always_inline, permitting a compiler to generate out-of-line versions, which may be instrumented. We marked the atomic wrappers as __always_inline in commit: c35a824c31834d94 ("arm64: make atomic helpers __always_inline") ... but did not think to do the same for the underlying implementations. If the compiler were to out-of-line an LSE or LL/SC atomic, this could break noinstr code. Ensure this doesn't happen by marking the underlying implementations as __always_inline. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Will Deacon Link: https://lore.kernel.org/r/20220817155914.3975112-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/atomic_ll_sc.h | 18 +++++++------- arch/arm64/include/asm/atomic_lse.h | 46 ++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 26 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 906e2d8c254c..0890e4f568fb 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -23,7 +23,7 @@ */ #define ATOMIC_OP(op, asm_op, constraint) \ -static inline void \ +static __always_inline void \ __ll_sc_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ @@ -40,7 +40,7 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ -static inline int \ +static __always_inline int \ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ @@ -61,7 +61,7 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ -static inline int \ +static __always_inline int \ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ @@ -119,7 +119,7 @@ ATOMIC_OPS(andnot, bic, ) #undef ATOMIC_OP #define ATOMIC64_OP(op, asm_op, constraint) \ -static inline void \ +static __always_inline void \ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ { \ s64 result; \ @@ -136,7 +136,7 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ } #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ -static inline long \ +static __always_inline long \ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ { \ s64 result; \ @@ -157,7 +157,7 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ -static inline long \ +static __always_inline long \ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ @@ -214,7 +214,7 @@ ATOMIC64_OPS(andnot, bic, ) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline s64 +static __always_inline s64 __ll_sc_atomic64_dec_if_positive(atomic64_t *v) { s64 result; @@ -237,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) } #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ -static inline u##sz \ +static __always_inline u##sz \ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ u##sz new) \ @@ -295,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, rel, cl) \ -static inline long \ +static __always_inline long \ __ll_sc__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 5d460f6b7675..52075e93de6c 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -11,7 +11,8 @@ #define __ASM_ATOMIC_LSE_H #define ATOMIC_OP(op, asm_op) \ -static inline void __lse_atomic_##op(int i, atomic_t *v) \ +static __always_inline void \ +__lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ @@ -25,7 +26,7 @@ ATOMIC_OP(or, stset) ATOMIC_OP(xor, steor) ATOMIC_OP(add, stadd) -static inline void __lse_atomic_sub(int i, atomic_t *v) +static __always_inline void __lse_atomic_sub(int i, atomic_t *v) { __lse_atomic_add(-i, v); } @@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ int old; \ \ @@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_FETCH_OP_SUB(name) \ -static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ return __lse_atomic_fetch_add##name(-i, v); \ } @@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( ) #undef ATOMIC_FETCH_OP_SUB #define ATOMIC_OP_ADD_SUB_RETURN(name) \ -static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_add_return##name(int i, atomic_t *v) \ { \ return __lse_atomic_fetch_add##name(i, v) + i; \ } \ \ -static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_sub_return##name(int i, atomic_t *v) \ { \ return __lse_atomic_fetch_sub(i, v) - i; \ } @@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( ) #undef ATOMIC_OP_ADD_SUB_RETURN -static inline void __lse_atomic_and(int i, atomic_t *v) +static __always_inline void __lse_atomic_and(int i, atomic_t *v) { return __lse_atomic_andnot(~i, v); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ +static __always_inline int \ +__lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ return __lse_atomic_fetch_andnot##name(~i, v); \ } @@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND #define ATOMIC64_OP(op, asm_op) \ -static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ +static __always_inline void \ +__lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ @@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset) ATOMIC64_OP(xor, steor) ATOMIC64_OP(add, stadd) -static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) +static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { __lse_atomic64_add(-i, v); } @@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ +static __always_inline long \ +__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 old; \ \ @@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OPS #define ATOMIC64_FETCH_OP_SUB(name) \ -static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ +static __always_inline long \ +__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ return __lse_atomic64_fetch_add##name(-i, v); \ } @@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( ) #undef ATOMIC64_FETCH_OP_SUB #define ATOMIC64_OP_ADD_SUB_RETURN(name) \ -static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ +static __always_inline long \ +__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \ { \ return __lse_atomic64_fetch_add##name(i, v) + i; \ } \ \ -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ +static __always_inline long \ +__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ { \ return __lse_atomic64_fetch_sub##name(i, v) - i; \ } @@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( ) #undef ATOMIC64_OP_ADD_SUB_RETURN -static inline void __lse_atomic64_and(s64 i, atomic64_t *v) +static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v) { return __lse_atomic64_andnot(~i, v); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ +static __always_inline long \ +__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ return __lse_atomic64_fetch_andnot##name(~i, v); \ } @@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) +static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { unsigned long tmp; -- cgit v1.2.3 From b502c87d2a26c349acbc231ff2acd6f17147926b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Sep 2022 11:17:28 +0100 Subject: arm64: report EL1 UNDEFs better If an UNDEFINED exception is taken from EL1, and do_undefinstr() doesn't find any suitable undef_hook, it will call: BUG_ON(!user_mode(regs)) ... and the kernel will report a failure witin do_undefinstr() rather than reporting the original context that the UNDEFINED exception was taken from. The pt_regs and ESR value reported within the BUG() handler will be from within do_undefinstr() and the code dump will be for the BRK in BUG_ON(), which isn't sufficient to debug the cause of the original exception. This patch makes the reporting better by having do_undefinstr() call die() directly in this case to report the original context from which the UNDEFINED exception was taken. Prior to this patch, an undefined instruction is reported as: | kernel BUG at arch/arm64/kernel/traps.c:497! | Internal error: Oops - BUG: 0 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 0 Comm: swapper Not tainted 5.19.0-rc3-00127-geff044f1b04e-dirty #3 | Hardware name: linux,dummy-virt (DT) | pstate: 000000c5 (nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : do_undefinstr+0x28c/0x2ac | lr : do_undefinstr+0x298/0x2ac | sp : ffff800009f63bc0 | x29: ffff800009f63bc0 x28: ffff800009f73c00 x27: ffff800009644a70 | x26: ffff8000096778a8 x25: 0000000000000040 x24: 0000000000000000 | x23: 00000000800000c5 x22: ffff800009894060 x21: ffff800009f63d90 | x20: 0000000000000000 x19: ffff800009f63c40 x18: 0000000000000006 | x17: 0000000000403000 x16: 00000000bfbfd000 x15: ffff800009f63830 | x14: ffffffffffffffff x13: 0000000000000000 x12: 0000000000000019 | x11: 0101010101010101 x10: 0000000000161b98 x9 : 0000000000000000 | x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000 | x5 : ffff800009f761d0 x4 : 0000000000000000 x3 : ffff80000a2b80f8 | x2 : 0000000000000000 x1 : ffff800009f73c00 x0 : 00000000800000c5 | Call trace: | do_undefinstr+0x28c/0x2ac | el1_undef+0x2c/0x4c | el1h_64_sync_handler+0x84/0xd0 | el1h_64_sync+0x64/0x68 | setup_arch+0x550/0x598 | start_kernel+0x88/0x6ac | __primary_switched+0xb8/0xc0 | Code: 17ffff95 a9425bf5 17ffffb8 a9025bf5 (d4210000) With this patch applied, an undefined instruction is reported as: | Internal error: Oops - Undefined instruction: 0 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 0 Comm: swapper Not tainted 5.19.0-rc3-00128-gf27cfcc80e52-dirty #5 | Hardware name: linux,dummy-virt (DT) | pstate: 800000c5 (Nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : setup_arch+0x550/0x598 | lr : setup_arch+0x50c/0x598 | sp : ffff800009f63d90 | x29: ffff800009f63d90 x28: 0000000081000200 x27: ffff800009644a70 | x26: ffff8000096778c8 x25: 0000000000000040 x24: 0000000000000000 | x23: 0000000000000100 x22: ffff800009f69a58 x21: ffff80000a2b80b8 | x20: 0000000000000000 x19: 0000000000000000 x18: 0000000000000006 | x17: 0000000000403000 x16: 00000000bfbfd000 x15: ffff800009f63830 | x14: ffffffffffffffff x13: 0000000000000000 x12: 0000000000000019 | x11: 0101010101010101 x10: 0000000000161b98 x9 : 0000000000000000 | x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000 | x5 : 0000000000000008 x4 : 0000000000000010 x3 : 0000000000000000 | x2 : 0000000000000000 x1 : 0000000000000000 x0 : 0000000000000000 | Call trace: | setup_arch+0x550/0x598 | start_kernel+0x88/0x6ac | __primary_switched+0xb8/0xc0 | Code: b4000080 90ffed80 912ac000 97db745f (00000000) Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Cc: Alexandru Elisei Cc: Amit Daniel Kachhap Cc: James Morse Cc: Will Deacon Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220913101732.3925290-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index b7fed33981f7..eac4f7a83175 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -494,7 +494,9 @@ void do_undefinstr(struct pt_regs *regs) if (call_undef_hook(regs) == 0) return; - BUG_ON(!user_mode(regs)); + if (!user_mode(regs)) + die("Oops - Undefined instruction", regs, 0); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } NOKPROBE_SYMBOL(do_undefinstr); -- cgit v1.2.3 From 18906ff9af6517c20763ed63dab602a4150794f7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Sep 2022 11:17:29 +0100 Subject: arm64: die(): pass 'err' as long Recently, we reworked a lot of code to consistentlt pass ESR_ELx as a 64-bit quantity. However, we missed that this can be passed into die() and __die() as the 'err' parameter where it is truncated to a 32-bit int. As notify_die() already takes 'err' as a long, this patch changes die() and __die() to also take 'err' as a long, ensuring that the full value of ESR_ELx is retained. At the same time, die() is updated to consistently log 'err' as a zero-padded 64-bit quantity. Subsequent patches will pass the ESR_ELx value to die() for a number of exceptions. Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Reviewed-by: Anshuman Khandual Cc: Alexandru Elisei Cc: Amit Daniel Kachhap Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20220913101732.3925290-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/system_misc.h | 2 +- arch/arm64/kernel/traps.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 0eb7709422e2..c34344256762 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -18,7 +18,7 @@ struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err); +void die(const char *msg, struct pt_regs *regs, long err); struct siginfo; void arm64_notify_die(const char *str, struct pt_regs *regs, diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index eac4f7a83175..da8ffef6f7c5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -180,12 +180,12 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) #define S_SMP " SMP" -static int __die(const char *str, int err, struct pt_regs *regs) +static int __die(const char *str, long err, struct pt_regs *regs) { static int die_counter; int ret; - pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", + pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on ARM */ @@ -206,7 +206,7 @@ static DEFINE_RAW_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ -void die(const char *str, struct pt_regs *regs, int err) +void die(const char *str, struct pt_regs *regs, long err) { int ret; unsigned long flags; -- cgit v1.2.3 From 0f2cb928a1547ae8f89e80a4b8df2c6c02ae5f96 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Sep 2022 11:17:30 +0100 Subject: arm64: consistently pass ESR_ELx to die() Currently, bug_handler() and kasan_handler() call die() with '0' as the 'err' value, whereas die_kernel_fault() passes the ESR_ELx value. For consistency, this patch ensures we always pass the ESR_ELx value to die(). As this is only called for exceptions taken from kernel mode, there should be no user-visible change as a result of this patch. For UNDEFINED exceptions, I've had to modify do_undefinstr() and its callers to pass the ESR_ELx value. In all cases the ESR_ELx value had already been read and was available. Signed-off-by: Mark Rutland Cc: Mark Brown Cc: Alexandru Elisei Cc: Amit Daniel Kachhap Cc: James Morse Cc: Will Deacon Reviewed-by: Anshuman Khandual Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20220913101732.3925290-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/exception.h | 2 +- arch/arm64/kernel/entry-common.c | 14 +++++++------- arch/arm64/kernel/traps.c | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index d94aecff9690..28c0275666e1 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -58,7 +58,7 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); -void do_undefinstr(struct pt_regs *regs); +void do_undefinstr(struct pt_regs *regs, unsigned long esr); void do_bti(struct pt_regs *regs); void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index c75ca36b4a49..3dd17ef6d6d8 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -379,11 +379,11 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } -static void noinstr el1_undef(struct pt_regs *regs) +static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) { enter_from_kernel_mode(regs); local_daif_inherit(regs); - do_undefinstr(regs); + do_undefinstr(regs, esr); local_daif_mask(); exit_to_kernel_mode(regs); } @@ -425,7 +425,7 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) break; case ESR_ELx_EC_SYS64: case ESR_ELx_EC_UNKNOWN: - el1_undef(regs); + el1_undef(regs, esr); break; case ESR_ELx_EC_BREAKPT_CUR: case ESR_ELx_EC_SOFTSTP_CUR: @@ -582,11 +582,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } -static void noinstr el0_undef(struct pt_regs *regs) +static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) { enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); - do_undefinstr(regs); + do_undefinstr(regs, esr); exit_to_user_mode(regs); } @@ -670,7 +670,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) el0_pc(regs, esr); break; case ESR_ELx_EC_UNKNOWN: - el0_undef(regs); + el0_undef(regs, esr); break; case ESR_ELx_EC_BTI: el0_bti(regs); @@ -788,7 +788,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_CP14_MR: case ESR_ELx_EC_CP14_LS: case ESR_ELx_EC_CP14_64: - el0_undef(regs); + el0_undef(regs, esr); break; case ESR_ELx_EC_CP15_32: case ESR_ELx_EC_CP15_64: diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index da8ffef6f7c5..05999cde870a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -485,7 +485,7 @@ void arm64_notify_segfault(unsigned long addr) force_signal_inject(SIGSEGV, code, addr, 0); } -void do_undefinstr(struct pt_regs *regs) +void do_undefinstr(struct pt_regs *regs, unsigned long esr) { /* check for AArch32 breakpoint instructions */ if (!aarch32_break_handler(regs)) @@ -495,7 +495,7 @@ void do_undefinstr(struct pt_regs *regs) return; if (!user_mode(regs)) - die("Oops - Undefined instruction", regs, 0); + die("Oops - Undefined instruction", regs, esr); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } @@ -760,7 +760,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs) hook_base = cp15_64_hooks; break; default: - do_undefinstr(regs); + do_undefinstr(regs, esr); return; } @@ -775,7 +775,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs) * EL0. Fall back to our usual undefined instruction handler * so that we handle these consistently. */ - do_undefinstr(regs); + do_undefinstr(regs, esr); } NOKPROBE_SYMBOL(do_cp15instr); #endif @@ -795,7 +795,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs) * back to our usual undefined instruction handler so that we handle * these consistently. */ - do_undefinstr(regs); + do_undefinstr(regs, esr); } NOKPROBE_SYMBOL(do_sysinstr); @@ -972,7 +972,7 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr) { switch (report_bug(regs->pc, regs)) { case BUG_TRAP_TYPE_BUG: - die("Oops - BUG", regs, 0); + die("Oops - BUG", regs, esr); break; case BUG_TRAP_TYPE_WARN: @@ -1040,7 +1040,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr) * This is something that might be fixed at some point in the future. */ if (!recover) - die("Oops - KASAN", regs, 0); + die("Oops - KASAN", regs, esr); /* If thread survives, skip over the brk instruction and continue: */ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); -- cgit v1.2.3 From a1fafa3b24a70461bbf3e5c0770893feb0a49292 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Sep 2022 11:17:31 +0100 Subject: arm64: rework FPAC exception handling If an FPAC exception is taken from EL1, the entry code will call do_ptrauth_fault(), where due to: BUG_ON(!user_mode(regs)) ... the kernel will report a problem within do_ptrauth_fault() rather than reporting the original context the FPAC exception was taken from. The pt_regs and ESR value reported will be from within do_ptrauth_fault() and the code dump will be for the BRK in BUG_ON(), which isn't sufficient to debug the cause of the original exception. This patch makes the reporting better by having separate EL0 and EL1 FPAC exception handlers, with the latter calling die() directly to report the original context the FPAC exception was taken from. Note that we only need to prevent kprobes of the EL1 FPAC handler, since the EL0 FPAC handler cannot be called recursively. For consistency with do_el0_svc*(), I've named the split functions do_el{0,1}_fpac() rather than do_el{0,1}_ptrauth_fault(). I've also clarified the comment to not imply there are casues other than FPAC exceptions. Prior to this patch FPAC exceptions are reported as: | kernel BUG at arch/arm64/kernel/traps.c:517! | Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00130-g9c8a180a1cdf-dirty #12 | Hardware name: FVP Base RevC (DT) | pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : do_ptrauth_fault+0x3c/0x40 | lr : el1_fpac+0x34/0x54 | sp : ffff80000a3bbc80 | x29: ffff80000a3bbc80 x28: ffff0008001d8000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: 0000000020400009 x22: ffff800008f70fa4 x21: ffff80000a3bbe00 | x20: 0000000072000000 x19: ffff80000a3bbcb0 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a3bbcb0 x4 : ffff0008001d8000 x3 : 0000000072000000 | x2 : 0000000000000000 x1 : 0000000020400009 x0 : ffff80000a3bbcb0 | Call trace: | do_ptrauth_fault+0x3c/0x40 | el1h_64_sync_handler+0xc4/0xd0 | el1h_64_sync+0x64/0x68 | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: 97fffe5e a8c17bfd d50323bf d65f03c0 (d4210000) With this patch applied FPAC exceptions are reported as: | Internal error: Oops - FPAC: 0000000072000000 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g78846e1c4757-dirty #11 | Hardware name: FVP Base RevC (DT) | pstate: 20400009 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : test_pac+0x8/0x10 | lr : 0x0 | sp : ffff80000a3bbe00 | x29: ffff80000a3bbe00 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2c8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff80000a007000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a2c6000 x4 : ffff0008001d8000 x3 : ffff800009f88378 | x2 : 0000000000000000 x1 : 0000000080210000 x0 : ffff000001a90000 | Call trace: | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d65f03c0 d503233f aa1f03fe (d50323bf) Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Reviewed-by: Anshuman Khandual Cc: Alexandru Elisei Cc: Amit Daniel Kachhap Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20220913101732.3925290-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/exception.h | 3 ++- arch/arm64/kernel/entry-common.c | 4 ++-- arch/arm64/kernel/traps.c | 16 ++++++++++------ 3 files changed, 14 insertions(+), 9 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 28c0275666e1..3f5141dc3341 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -72,7 +72,8 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); -void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); +void do_el0_fpac(struct pt_regs *regs, unsigned long esr); +void do_el1_fpac(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3dd17ef6d6d8..f334951d38e3 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -402,7 +402,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) { enter_from_kernel_mode(regs); local_daif_inherit(regs); - do_ptrauth_fault(regs, esr); + do_el1_fpac(regs, esr); local_daif_mask(); exit_to_kernel_mode(regs); } @@ -629,7 +629,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) { enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); - do_ptrauth_fault(regs, esr); + do_el0_fpac(regs, esr); exit_to_user_mode(regs); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 05999cde870a..6cec65fbf8b8 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -508,16 +508,20 @@ void do_bti(struct pt_regs *regs) } NOKPROBE_SYMBOL(do_bti); -void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) +void do_el0_fpac(struct pt_regs *regs, unsigned long esr) +{ + force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); +} + +void do_el1_fpac(struct pt_regs *regs, unsigned long esr) { /* - * Unexpected FPAC exception or pointer authentication failure in - * the kernel: kill the task before it does any more harm. + * Unexpected FPAC exception in the kernel: kill the task before it + * does any more harm. */ - BUG_ON(!user_mode(regs)); - force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); + die("Oops - FPAC", regs, esr); } -NOKPROBE_SYMBOL(do_ptrauth_fault); +NOKPROBE_SYMBOL(do_el1_fpac) #define __user_cache_maint(insn, address, res) \ if (address >= TASK_SIZE_MAX) { \ -- cgit v1.2.3 From 830a2a4d853f2c4a1e4606aa03341b7f273b0e9b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Sep 2022 11:17:32 +0100 Subject: arm64: rework BTI exception handling If a BTI exception is taken from EL1, the entry code will treat this as an unhandled exception and will panic() the kernel. This is inconsistent with the way we handle FPAC exceptions, which have a dedicated handler and only necessarily kill the thread from which the exception was taken from, and we don't log all the information that could be relevant to debug the issue. The code in do_bti() has: BUG_ON(!user_mode(regs)); ... and it seems like the intent was to call this for EL1 BTI exceptions, as with FPAC, but this was omitted due to an oversight. This patch adds separate EL0 and EL1 BTI exception handlers, with the latter calling die() directly to report the original context the BTI exception was taken from. This matches our handling of FPAC exceptions. Prior to this patch, a BTI failure is reported as: | Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4 | Kernel panic - not syncing: Unhandled exception | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace.part.0+0xcc/0xe0 | show_stack+0x18/0x5c | dump_stack_lvl+0x64/0x80 | dump_stack+0x18/0x34 | panic+0x170/0x360 | arm64_exit_nmi.isra.0+0x0/0x80 | el1h_64_sync_handler+0x64/0xd0 | el1h_64_sync+0x64/0x68 | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 With this patch applied, a BTI failure is reported as: | Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804 | Call trace: | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf) Signed-off-by: Mark Rutland Reviewed-by: Mark Brown Reviewed-by: Anshuman Khandual Cc: Alexandru Elisei Cc: Amit Daniel Kachhap Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/exception.h | 3 ++- arch/arm64/kernel/entry-common.c | 14 +++++++++++++- arch/arm64/kernel/traps.c | 10 +++++++--- 3 files changed, 22 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 3f5141dc3341..0278a58abe69 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -59,7 +59,8 @@ asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs, unsigned long esr); -void do_bti(struct pt_regs *regs); +void do_el0_bti(struct pt_regs *regs); +void do_el1_bti(struct pt_regs *regs, unsigned long esr); void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index f334951d38e3..9173fad279af 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -388,6 +388,15 @@ static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } +static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) +{ + enter_from_kernel_mode(regs); + local_daif_inherit(regs); + do_el1_bti(regs, esr); + local_daif_mask(); + exit_to_kernel_mode(regs); +} + static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -427,6 +436,9 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_UNKNOWN: el1_undef(regs, esr); break; + case ESR_ELx_EC_BTI: + el1_bti(regs, esr); + break; case ESR_ELx_EC_BREAKPT_CUR: case ESR_ELx_EC_SOFTSTP_CUR: case ESR_ELx_EC_WATCHPT_CUR: @@ -594,7 +606,7 @@ static void noinstr el0_bti(struct pt_regs *regs) { enter_from_user_mode(regs); local_daif_restore(DAIF_PROCCTX); - do_bti(regs); + do_el0_bti(regs); exit_to_user_mode(regs); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 6cec65fbf8b8..54b5ba135b97 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -501,12 +501,16 @@ void do_undefinstr(struct pt_regs *regs, unsigned long esr) } NOKPROBE_SYMBOL(do_undefinstr); -void do_bti(struct pt_regs *regs) +void do_el0_bti(struct pt_regs *regs) { - BUG_ON(!user_mode(regs)); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } -NOKPROBE_SYMBOL(do_bti); + +void do_el1_bti(struct pt_regs *regs, unsigned long esr) +{ + die("Oops - BTI", regs, esr); +} +NOKPROBE_SYMBOL(do_el1_bti); void do_el0_fpac(struct pt_regs *regs, unsigned long esr) { -- cgit v1.2.3 From c0357a73fa4a96d8ed9ee46e9927d9fcbc9d0828 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:49 +0100 Subject: arm64/sysreg: Align field names in ID_AA64DFR0_EL1 with architecture The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1 does not align well with kernel conventions, using as it does a lot of MixedCase in various arrangements. In preparation for automatically generating the defines for this register rename the defines used to match what is in the architecture. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/el2_setup.h | 8 +++---- arch/arm64/include/asm/hw_breakpoint.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 40 +++++++++++++++++----------------- arch/arm64/kernel/cpufeature.c | 14 ++++++------ arch/arm64/kernel/debug-monitors.c | 2 +- arch/arm64/kernel/perf_event.c | 8 +++---- arch/arm64/kvm/debug.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 12 +++++----- arch/arm64/kvm/pmu-emul.c | 16 +++++++------- arch/arm64/kvm/sys_regs.c | 16 +++++++------- 12 files changed, 64 insertions(+), 64 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c1fc5f7bb978..71222f2b8a5a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -512,7 +512,7 @@ alternative_endif */ .macro reset_pmuserenr_el0, tmpreg mrs \tmpreg, id_aa64dfr0_el1 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 + sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 cmp \tmpreg, #1 // Skip if no PMU present b.lt 9000f msr pmuserenr_el0, xzr // Disable PMU access from EL0 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 79bb9e58d9c6..e3b63967c8a9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) u64 mask = GENMASK_ULL(field + 3, field); /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ - if (val == ID_AA64DFR0_PMUVER_IMP_DEF) + if (val == ID_AA64DFR0_PMUVer_IMP_DEF) val = 0; if (val > cap) { diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b6e9bea7c9ec..03af4278bc23 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -40,7 +40,7 @@ .macro __init_el2_debug mrs x1, id_aa64dfr0_el1 - sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 + sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 cmp x0, #1 b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps @@ -49,7 +49,7 @@ csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ - ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, @@ -65,7 +65,7 @@ .Lskip_spe_\@: /* Trace buffer */ - ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present mrs_s x0, SYS_TRBIDR_EL1 @@ -137,7 +137,7 @@ mov x0, xzr mrs x1, id_aa64dfr0_el1 - ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 cmp x1, #3 b.lt .Lset_debug_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index bc7aaed4b34e..d667c03d5f5e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -142,7 +142,7 @@ static inline int get_num_brps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_BRPS_SHIFT); + ID_AA64DFR0_BRPs_SHIFT); } /* Determine number of WRP registers available. */ @@ -151,7 +151,7 @@ static inline int get_num_wrps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_WRPS_SHIFT); + ID_AA64DFR0_WRPs_SHIFT); } #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c7876363c6e5..b1e9e4d3d964 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -700,26 +700,26 @@ /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TRBE_SHIFT 44 -#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 -#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 -#define ID_AA64DFR0_PMSVER_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 -#define ID_AA64DFR0_WRPS_SHIFT 20 -#define ID_AA64DFR0_BRPS_SHIFT 12 -#define ID_AA64DFR0_PMUVER_SHIFT 8 -#define ID_AA64DFR0_TRACEVER_SHIFT 4 -#define ID_AA64DFR0_DEBUGVER_SHIFT 0 - -#define ID_AA64DFR0_PMUVER_8_0 0x1 -#define ID_AA64DFR0_PMUVER_8_1 0x4 -#define ID_AA64DFR0_PMUVER_8_4 0x5 -#define ID_AA64DFR0_PMUVER_8_5 0x6 -#define ID_AA64DFR0_PMUVER_8_7 0x7 -#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVER_8_2 0x1 -#define ID_AA64DFR0_PMSVER_8_3 0x2 +#define ID_AA64DFR0_TraceBuffer_SHIFT 44 +#define ID_AA64DFR0_TraceFilt_SHIFT 40 +#define ID_AA64DFR0_DoubleLock_SHIFT 36 +#define ID_AA64DFR0_PMSVer_SHIFT 32 +#define ID_AA64DFR0_CTX_CMPs_SHIFT 28 +#define ID_AA64DFR0_WRPs_SHIFT 20 +#define ID_AA64DFR0_BRPs_SHIFT 12 +#define ID_AA64DFR0_PMUVer_SHIFT 8 +#define ID_AA64DFR0_TraceVer_SHIFT 4 +#define ID_AA64DFR0_DebugVer_SHIFT 0 + +#define ID_AA64DFR0_PMUVer_8_0 0x1 +#define ID_AA64DFR0_PMUVer_8_1 0x4 +#define ID_AA64DFR0_PMUVer_8_4 0x5 +#define ID_AA64DFR0_PMUVer_8_5 0x6 +#define ID_AA64DFR0_PMUVer_8_7 0x7 +#define ID_AA64DFR0_PMUVer_IMP_DEF 0xf + +#define ID_AA64DFR0_PMSVer_8_2 0x1 +#define ID_AA64DFR0_PMSVer_8_3 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c2e42feb3e1a..c694d6cbf82d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), /* * We can instantiate multiple PMU instances with different levels * of support. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index bf9fe71589bc..572c8ac2873c 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -28,7 +28,7 @@ u8 debug_monitors_arch(void) { return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), - ID_AA64DFR0_DEBUGVER_SHIFT); + ID_AA64DFR0_DebugVer_SHIFT); } /* diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cb69ff1e6138..83f7a8980623 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) dfr0 = read_sysreg(id_aa64dfr0_el1); pmuver = cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_PMUVER_SHIFT); - if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0) + ID_AA64DFR0_PMUVer_SHIFT); + if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) return; cpu_pmu->pmuver = pmuver; @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 0b28d7db7c76..14878cd55c53 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * If SPE is present on this CPU and is available at current EL, * we may need to check if the host state needs to be saved. */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); } diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index fc3e32709ba2..ba1327fac03f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Trap/constrain PMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK; } /* Trap Debug */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; /* Trap OS Double Lock */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) mdcr_set |= MDCR_EL2_TDOSA; /* Trap SPE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPMS; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; } /* Trap Trace Filter */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) cptr_set |= CPTR_EL2_TTA; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 11c43bed5f97..331478c67dca 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_PMUVER_8_0: + case ID_AA64DFR0_PMUVer_8_0: return GENMASK(9, 0); - case ID_AA64DFR0_PMUVER_8_1: - case ID_AA64DFR0_PMUVER_8_4: - case ID_AA64DFR0_PMUVER_8_5: - case ID_AA64DFR0_PMUVER_8_7: + case ID_AA64DFR0_PMUVer_8_1: + case ID_AA64DFR0_PMUVer_8_4: + case ID_AA64DFR0_PMUVer_8_5: + case ID_AA64DFR0_PMUVer_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) return; mutex_lock(&arm_pmus_lock); @@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) if (event->pmu) { pmu = to_arm_pmu(event->pmu); if (pmu->pmuver == 0 || - pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) + pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) pmu = NULL; } @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index fa61793467a7..626d7769e23d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64DFR0_EL1: /* Limit debug to ARMv8.0 */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVER_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); + ID_AA64DFR0_PMUVer_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); /* Hide SPE from guests */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); break; case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.4 */ @@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); - p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) + p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } -- cgit v1.2.3 From fcf37b38ff2282ef3dc6ba1966c83b29e5734edd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:50 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64DFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/el2_setup.h | 8 +++---- arch/arm64/include/asm/hw_breakpoint.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 42 +++++++++++++++++----------------- arch/arm64/kernel/cpufeature.c | 14 ++++++------ arch/arm64/kernel/debug-monitors.c | 2 +- arch/arm64/kernel/perf_event.c | 8 +++---- arch/arm64/kvm/debug.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 12 +++++----- arch/arm64/kvm/pmu-emul.c | 16 ++++++------- arch/arm64/kvm/sys_regs.c | 16 ++++++------- 12 files changed, 65 insertions(+), 65 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 71222f2b8a5a..cf8e72e733de 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -512,7 +512,7 @@ alternative_endif */ .macro reset_pmuserenr_el0, tmpreg mrs \tmpreg, id_aa64dfr0_el1 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 + sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 cmp \tmpreg, #1 // Skip if no PMU present b.lt 9000f msr pmuserenr_el0, xzr // Disable PMU access from EL0 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e3b63967c8a9..ff06e6fb5939 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) u64 mask = GENMASK_ULL(field + 3, field); /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ - if (val == ID_AA64DFR0_PMUVer_IMP_DEF) + if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) val = 0; if (val > cap) { diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 03af4278bc23..668569adf4d3 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -40,7 +40,7 @@ .macro __init_el2_debug mrs x1, id_aa64dfr0_el1 - sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 + sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 cmp x0, #1 b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps @@ -49,7 +49,7 @@ csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ - ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, @@ -65,7 +65,7 @@ .Lskip_spe_\@: /* Trace buffer */ - ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present mrs_s x0, SYS_TRBIDR_EL1 @@ -137,7 +137,7 @@ mov x0, xzr mrs x1, id_aa64dfr0_el1 - ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 + ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cmp x1, #3 b.lt .Lset_debug_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d667c03d5f5e..fa4c6ff3aa9b 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -142,7 +142,7 @@ static inline int get_num_brps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_BRPs_SHIFT); + ID_AA64DFR0_EL1_BRPs_SHIFT); } /* Determine number of WRP registers available. */ @@ -151,7 +151,7 @@ static inline int get_num_wrps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_WRPs_SHIFT); + ID_AA64DFR0_EL1_WRPs_SHIFT); } #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b1e9e4d3d964..a9544561397d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -699,27 +699,27 @@ #endif /* id_aa64dfr0 */ -#define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TraceBuffer_SHIFT 44 -#define ID_AA64DFR0_TraceFilt_SHIFT 40 -#define ID_AA64DFR0_DoubleLock_SHIFT 36 -#define ID_AA64DFR0_PMSVer_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPs_SHIFT 28 -#define ID_AA64DFR0_WRPs_SHIFT 20 -#define ID_AA64DFR0_BRPs_SHIFT 12 -#define ID_AA64DFR0_PMUVer_SHIFT 8 -#define ID_AA64DFR0_TraceVer_SHIFT 4 -#define ID_AA64DFR0_DebugVer_SHIFT 0 - -#define ID_AA64DFR0_PMUVer_8_0 0x1 -#define ID_AA64DFR0_PMUVer_8_1 0x4 -#define ID_AA64DFR0_PMUVer_8_4 0x5 -#define ID_AA64DFR0_PMUVer_8_5 0x6 -#define ID_AA64DFR0_PMUVer_8_7 0x7 -#define ID_AA64DFR0_PMUVer_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVer_8_2 0x1 -#define ID_AA64DFR0_PMSVer_8_3 0x2 +#define ID_AA64DFR0_EL1_MTPMU_SHIFT 48 +#define ID_AA64DFR0_EL1_TraceBuffer_SHIFT 44 +#define ID_AA64DFR0_EL1_TraceFilt_SHIFT 40 +#define ID_AA64DFR0_EL1_DoubleLock_SHIFT 36 +#define ID_AA64DFR0_EL1_PMSVer_SHIFT 32 +#define ID_AA64DFR0_EL1_CTX_CMPs_SHIFT 28 +#define ID_AA64DFR0_EL1_WRPs_SHIFT 20 +#define ID_AA64DFR0_EL1_BRPs_SHIFT 12 +#define ID_AA64DFR0_EL1_PMUVer_SHIFT 8 +#define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 +#define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 + +#define ID_AA64DFR0_EL1_PMUVer_8_0 0x1 +#define ID_AA64DFR0_EL1_PMUVer_8_1 0x4 +#define ID_AA64DFR0_EL1_PMUVer_8_4 0x5 +#define ID_AA64DFR0_EL1_PMUVer_8_5 0x6 +#define ID_AA64DFR0_EL1_PMUVer_8_7 0x7 +#define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf + +#define ID_AA64DFR0_EL1_PMSVer_8_2 0x1 +#define ID_AA64DFR0_EL1_PMSVer_8_3 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c694d6cbf82d..c22732a6908b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0), /* * We can instantiate multiple PMU instances with different levels * of support. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 572c8ac2873c..3da09778267e 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -28,7 +28,7 @@ u8 debug_monitors_arch(void) { return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), - ID_AA64DFR0_DebugVer_SHIFT); + ID_AA64DFR0_EL1_DebugVer_SHIFT); } /* diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 83f7a8980623..8b878837d8f1 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) dfr0 = read_sysreg(id_aa64dfr0_el1); pmuver = cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_PMUVer_SHIFT); - if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) + ID_AA64DFR0_EL1_PMUVer_SHIFT); + if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0) return; cpu_pmu->pmuver = pmuver; @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 14878cd55c53..3f7563d768e2 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * If SPE is present on this CPU and is available at current EL, * we may need to check if the host state needs to be saved. */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); } diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index ba1327fac03f..85d3b7ae720f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Trap/constrain PMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK; } /* Trap Debug */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids)) mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; /* Trap OS Double Lock */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids)) mdcr_set |= MDCR_EL2_TDOSA; /* Trap SPE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPMS; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; } /* Trap Trace Filter */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) cptr_set |= CPTR_EL2_TTA; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 331478c67dca..7122b5387de6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_PMUVer_8_0: + case ID_AA64DFR0_EL1_PMUVer_8_0: return GENMASK(9, 0); - case ID_AA64DFR0_PMUVer_8_1: - case ID_AA64DFR0_PMUVer_8_4: - case ID_AA64DFR0_PMUVer_8_5: - case ID_AA64DFR0_PMUVer_8_7: + case ID_AA64DFR0_EL1_PMUVer_8_1: + case ID_AA64DFR0_EL1_PMUVer_8_4: + case ID_AA64DFR0_EL1_PMUVer_8_5: + case ID_AA64DFR0_EL1_PMUVer_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) return; mutex_lock(&arm_pmus_lock); @@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) if (event->pmu) { pmu = to_arm_pmu(event->pmu); if (pmu->pmuver == 0 || - pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) + pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) pmu = NULL; } @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 626d7769e23d..d4546a5b0ea5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64DFR0_EL1: /* Limit debug to ARMv8.0 */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6); /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVer_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); + ID_AA64DFR0_EL1_PMUVer_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0); /* Hide SPE from guests */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); break; case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.4 */ @@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); - p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) + p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20) | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } -- cgit v1.2.3 From 121a8fc088f13c64d9f3c9b3e7faa4c246e0a32c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:51 +0100 Subject: arm64/sysreg: Use feature numbering for PMU and SPE revisions Currently the kernel refers to the versions of the PMU and SPE features by the version of the architecture where those features were updated but the ARM refers to them using the FEAT_ names for the features. To improve consistency and help with updating for newer features and since v9 will make our current naming scheme a bit more confusing update the macros identfying features to use the FEAT_ based scheme. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 14 +++++++------- arch/arm64/kernel/perf_event.c | 4 ++-- arch/arm64/kvm/pmu-emul.c | 12 ++++++------ arch/arm64/kvm/sys_regs.c | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a9544561397d..aea3ec657c3f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -711,15 +711,15 @@ #define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 #define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 -#define ID_AA64DFR0_EL1_PMUVer_8_0 0x1 -#define ID_AA64DFR0_EL1_PMUVer_8_1 0x4 -#define ID_AA64DFR0_EL1_PMUVer_8_4 0x5 -#define ID_AA64DFR0_EL1_PMUVer_8_5 0x6 -#define ID_AA64DFR0_EL1_PMUVer_8_7 0x7 +#define ID_AA64DFR0_EL1_PMUVer_IMP 0x1 +#define ID_AA64DFR0_EL1_PMUVer_V3P1 0x4 +#define ID_AA64DFR0_EL1_PMUVer_V3P4 0x5 +#define ID_AA64DFR0_EL1_PMUVer_V3P5 0x6 +#define ID_AA64DFR0_EL1_PMUVer_V3P7 0x7 #define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf -#define ID_AA64DFR0_EL1_PMSVer_8_2 0x1 -#define ID_AA64DFR0_EL1_PMSVer_8_3 0x2 +#define ID_AA64DFR0_EL1_PMSVer_IMP 0x1 +#define ID_AA64DFR0_EL1_PMSVer_V1P1 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 8b878837d8f1..7b0643fe2f13 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 7122b5387de6..0003c7d37533 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_EL1_PMUVer_8_0: + case ID_AA64DFR0_EL1_PMUVer_IMP: return GENMASK(9, 0); - case ID_AA64DFR0_EL1_PMUVer_8_1: - case ID_AA64DFR0_EL1_PMUVer_8_4: - case ID_AA64DFR0_EL1_PMUVer_8_5: - case ID_AA64DFR0_EL1_PMUVer_8_7: + case ID_AA64DFR0_EL1_PMUVer_V3P1: + case ID_AA64DFR0_EL1_PMUVer_V3P4: + case ID_AA64DFR0_EL1_PMUVer_V3P5: + case ID_AA64DFR0_EL1_PMUVer_V3P7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d4546a5b0ea5..2ef1121ab844 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1115,7 +1115,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, ID_AA64DFR0_EL1_PMUVer_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0); + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0); /* Hide SPE from guests */ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); break; -- cgit v1.2.3 From e62a2d2610f0e6cf803027e3803c822140a2a407 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:52 +0100 Subject: arm64/sysreg: Convert ID_AA64FDR0_EL1 to automatic generation Convert ID_AA64DFR0_EL1 to automatic generation as per DDI0487I.a, no functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 24 ---------------- arch/arm64/tools/sysreg | 63 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 24 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index aea3ec657c3f..943def0d28f2 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -190,7 +190,6 @@ #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) -#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) @@ -698,29 +697,6 @@ #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif -/* id_aa64dfr0 */ -#define ID_AA64DFR0_EL1_MTPMU_SHIFT 48 -#define ID_AA64DFR0_EL1_TraceBuffer_SHIFT 44 -#define ID_AA64DFR0_EL1_TraceFilt_SHIFT 40 -#define ID_AA64DFR0_EL1_DoubleLock_SHIFT 36 -#define ID_AA64DFR0_EL1_PMSVer_SHIFT 32 -#define ID_AA64DFR0_EL1_CTX_CMPs_SHIFT 28 -#define ID_AA64DFR0_EL1_WRPs_SHIFT 20 -#define ID_AA64DFR0_EL1_BRPs_SHIFT 12 -#define ID_AA64DFR0_EL1_PMUVer_SHIFT 8 -#define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 -#define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 - -#define ID_AA64DFR0_EL1_PMUVer_IMP 0x1 -#define ID_AA64DFR0_EL1_PMUVer_V3P1 0x4 -#define ID_AA64DFR0_EL1_PMUVer_V3P4 0x5 -#define ID_AA64DFR0_EL1_PMUVer_V3P5 0x6 -#define ID_AA64DFR0_EL1_PMUVer_V3P7 0x7 -#define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf - -#define ID_AA64DFR0_EL1_PMSVer_IMP 0x1 -#define ID_AA64DFR0_EL1_PMSVer_V1P1 0x2 - #define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_8_0 0x3 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 5bccf7712ec3..e552805c7501 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -252,6 +252,69 @@ EndEnum Res0 31:0 EndSysreg +Sysreg ID_AA64DFR0_EL1 3 0 0 5 0 +Enum 63:60 HPMN0 + 0b0000 UNPREDICTABLE + 0b0001 DEF +EndEnum +Res0 59:56 +Enum 55:52 BRBE + 0b0000 NI + 0b0001 IMP + 0b0010 BRBE_V1P1 +EndEnum +Enum 51:48 MTPMU + 0b0000 NI_IMPDEF + 0b0001 IMP + 0b1111 NI +EndEnum +Enum 47:44 TraceBuffer + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 43:40 TraceFilt + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 39:36 DoubleLock + 0b0000 IMP + 0b1111 NI +EndEnum +Enum 35:32 PMSVer + 0b0000 NI + 0b0001 IMP + 0b0010 V1P1 + 0b0011 V1P2 + 0b0100 V1P3 +EndEnum +Field 31:28 CTX_CMPs +Res0 27:24 +Field 23:20 WRPs +Res0 19:16 +Field 15:12 BRPs +Enum 11:8 PMUVer + 0b0000 NI + 0b0001 IMP + 0b0100 V3P1 + 0b0101 V3P4 + 0b0110 V3P5 + 0b0111 V3P7 + 0b1000 V3P8 + 0b1111 IMP_DEF +EndEnum +Enum 7:4 TraceVer + 0b0000 NI + 0b0001 IMP +EndEnum +Enum 3:0 DebugVer + 0b0110 IMP + 0b0111 VHE + 0b1000 V8P2 + 0b1001 V8P4 + 0b1010 V8P8 +EndEnum +EndSysreg + Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Enum 63:60 RNDR 0b0000 NI -- cgit v1.2.3 From c65c617806ed490cf0ed09a151c627e41ce6e0c6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:53 +0100 Subject: arm64/sysreg: Convert ID_AA64DFR1_EL1 to automatic generation Convert ID_AA64FDR1_EL1 to automatic generation as per DDI0487I.a, no functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 943def0d28f2..9a7d84d39b0b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -190,8 +190,6 @@ #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) -#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) - #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index e552805c7501..076766b6faf0 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -315,6 +315,10 @@ Enum 3:0 DebugVer EndEnum EndSysreg +Sysreg ID_AA64DFR1_EL1 3 0 0 5 1 +Res0 63:0 +EndSysreg + Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Enum 63:60 RNDR 0b0000 NI -- cgit v1.2.3 From 10453bf149c9539c446574932f00ea50438cede5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:54 +0100 Subject: arm64/sysreg: Convert ID_AA64AFRn_EL1 to automatic generation Convert ID_AA64AFRn_EL1 to automatic generation as per DDI0487I.a, no functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 3 --- arch/arm64/tools/sysreg | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 9a7d84d39b0b..debc1c0b2b7f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -190,9 +190,6 @@ #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) -#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) -#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) - #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 076766b6faf0..7f1fb36f208c 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -319,6 +319,22 @@ Sysreg ID_AA64DFR1_EL1 3 0 0 5 1 Res0 63:0 EndSysreg +Sysreg ID_AA64AFR0_EL1 3 0 0 5 4 +Res0 63:32 +Field 31:28 IMPDEF7 +Field 27:24 IMPDEF6 +Field 23:20 IMPDEF5 +Field 19:16 IMPDEF4 +Field 15:12 IMPDEF3 +Field 11:8 IMPDEF2 +Field 7:4 IMPDEF1 +Field 3:0 IMPDEF0 +EndSysreg + +Sysreg ID_AA64AFR1_EL1 3 0 0 5 5 +Res0 63:0 +EndSysreg + Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Enum 63:60 RNDR 0b0000 NI -- cgit v1.2.3 From 237405ebef580a7352a52129b2465c117145eafa Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 9 Sep 2022 17:59:36 +0100 Subject: arm64: cpufeature: Force HWCAP to be based on the sysreg visible to user-space arm64 advertises hardware features to user-space via HWCAPs, and by emulating access to the CPUs id registers. The cpufeature code has a sanitised system-wide view of an id register, and a sanitised user-space view of an id register, where some features use their 'safe' value instead of the hardware value. It is currently possible for a HWCAP to be advertised where the user-space view of the id register does not show the feature as supported. Erratum workaround need to remove both the HWCAP, and the feature from the user-space view of the id register. This involves duplicating the code, and spreading it over cpufeature.c and cpu_errata.c. Make the HWCAP code use the user-space view of id registers. This ensures the values never diverge, and allows erratum workaround to remove HWCAP by modifying the user-space view of the id register. Signed-off-by: James Morse Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20220909165938.3931307-2-james.morse@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af4de817d712..b6a4e97805a4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1401,17 +1401,40 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) return val >= entry->min_field_value; } -static bool -has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +static u64 +read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope) { - u64 val; - WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); if (scope == SCOPE_SYSTEM) - val = read_sanitised_ftr_reg(entry->sys_reg); + return read_sanitised_ftr_reg(entry->sys_reg); else - val = __read_sysreg_by_encoding(entry->sys_reg); + return __read_sysreg_by_encoding(entry->sys_reg); +} + +static bool +has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + int mask; + struct arm64_ftr_reg *regp; + u64 val = read_scoped_sysreg(entry, scope); + + regp = get_arm64_ftr_reg(entry->sys_reg); + if (!regp) + return false; + + mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask, + entry->field_pos, + entry->field_width); + if (!mask) + return false; + + return feature_matches(val, entry); +} +static bool +has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + u64 val = read_scoped_sysreg(entry, scope); return feature_matches(val, entry); } @@ -2624,7 +2647,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }; #define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \ - .matches = has_cpuid_feature, \ + .matches = has_user_cpuid_feature, \ .sys_reg = reg, \ .field_pos = field, \ .field_width = width, \ -- cgit v1.2.3 From 445c953e4a84b2942bdb5c0b5aff571c903680fe Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 9 Sep 2022 17:59:37 +0100 Subject: arm64: cpufeature: Expose get_arm64_ftr_reg() outside cpufeature.c get_arm64_ftr_reg() returns the properties of a system register based on its instruction encoding. This is needed by erratum workaround in cpu_errata.c to modify the user-space visible view of id registers. Signed-off-by: James Morse Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20220909165938.3931307-3-james.morse@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 ++ arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fd7d75a275f6..5c3317bc06c7 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -907,6 +907,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) return 8; } +struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id); + extern struct arm64_ftr_override id_aa64mmfr1_override; extern struct arm64_ftr_override id_aa64pfr0_override; extern struct arm64_ftr_override id_aa64pfr1_override; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b6a4e97805a4..24f43ad02cd3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -750,7 +750,7 @@ static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id) * returns - Upon success, matching ftr_reg entry for id. * - NULL on failure but with an WARN_ON(). */ -static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) +struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) { struct arm64_ftr_reg *reg; -- cgit v1.2.3 From 1bdb0fbb2e27c51a6f311867726462c983a1d9ee Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 9 Sep 2022 17:59:38 +0100 Subject: arm64: errata: remove BF16 HWCAP due to incorrect result on Cortex-A510 Cortex-A510's erratum #2658417 causes two BF16 instructions to return the wrong result in rare circumstances when a pair of A510 CPUs are using shared neon hardware. The two instructions affected are BFMMLA and VMMLA, support for these is indicated by the BF16 HWCAP. Remove it on affected platforms. Signed-off-by: James Morse Link: https://lore.kernel.org/r/20220909165938.3931307-4-james.morse@arm.com [catalin.marinas@arm.com: add revision to the Kconfig help; remove .type] Signed-off-by: Catalin Marinas --- Documentation/arm64/silicon-errata.rst | 2 ++ arch/arm64/Kconfig | 13 +++++++++++++ arch/arm64/kernel/cpu_errata.c | 26 ++++++++++++++++++++++++++ arch/arm64/tools/cpucaps | 1 + 4 files changed, 42 insertions(+) (limited to 'arch/arm64') diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index fda97b3fcf01..17d9fc5d14fb 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -110,6 +110,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9fb9fff08c94..526ab76cd233 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -733,6 +733,19 @@ config ARM64_ERRATUM_2077057 If unsure, say Y. +config ARM64_ERRATUM_2658417 + bool "Cortex-A510: 2658417: remove BF16 support due to incorrect result" + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2658417. + Affected Cortex-A510 (r0p0 to r1p1) may produce the wrong result for + BFMMLA or VMMLA instructions in rare circumstances when a pair of + A510 CPUs are using shared neon hardware. As the sharing is not + discoverable by the kernel, hide the BF16 HWCAP to indicate that + user-space should not be using these instructions. + + If unsure, say Y. + config ARM64_ERRATUM_2119858 bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode" default y diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 53b973b6059f..58ca4f6b25d6 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -121,6 +121,22 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); } +static DEFINE_RAW_SPINLOCK(reg_user_mask_modification); +static void __maybe_unused +cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused) +{ + struct arm64_ftr_reg *regp; + + regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); + if (!regp) + return; + + raw_spin_lock(®_user_mask_modification); + if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK) + regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; + raw_spin_unlock(®_user_mask_modification); +} + #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ .matches = is_affected_midr_range, \ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) @@ -691,6 +707,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { CAP_MIDR_RANGE_LIST(broken_aarch32_aes), .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2658417 + { + .desc = "ARM erratum 2658417", + .capability = ARM64_WORKAROUND_2658417, + /* Cortex-A510 r0p0 - r1p1 */ + ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), + MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), + .cpu_enable = cpu_clear_bf16_from_user_emulation, + }, #endif { } diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 63b2484ce6c3..f553a7cb1b07 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -68,6 +68,7 @@ WORKAROUND_2038923 WORKAROUND_2064142 WORKAROUND_2077057 WORKAROUND_2457168 +WORKAROUND_2658417 WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -- cgit v1.2.3 From 120072f48f4ef0b017f65d5efd0548938cb43052 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 19 Sep 2022 17:27:53 +0100 Subject: arm64: configs: Enable all PMUs provided by Arm The selection of PMUs enabled in the defconfig is currently a bit random and does not include a number of those provided by Arm and present in a fairly wide range of SoCs. Improve coverage and defconfig utility by enabling all the Arm provided PMUs by default. Signed-off-by: Mark Brown Reviewed-by: James Clark Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220919162753.3079869-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/configs/defconfig | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index d5b2d2dd4904..ab8c014eb314 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1229,8 +1229,14 @@ CONFIG_PHY_UNIPHIER_USB3=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_PHY_AM654_SERDES=m CONFIG_PHY_J721E_WIZ=m +CONFIG_ARM_CCI_PMU=m +CONFIG_ARM_CCN=m +CONFIG_ARM_CMN=m CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_DSU_PMU=m CONFIG_FSL_IMX8_DDR_PMU=m +CONFIG_ARM_SPE_PMU=m +CONFIG_ARM_DMC620_PMU=m CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y CONFIG_HISI_PMU=y -- cgit v1.2.3 From 31dbadcc2828ac8c4608bf2ac6b12b286943e634 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 29 Jun 2022 17:35:24 +0800 Subject: arm64: defconfig: Enable memory hotplug and hotremove config Let's enable ACPI_HMAT, ACPI_HOTPLUG_MEMORY, MEMORY_HOTPLUG and MEMORY_HOTREMOVE for more test coverage, also there are useful for heterogeneous memory scene. Signed-off-by: Kefeng Wang Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220629093524.34801-1-wangkefeng.wang@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/configs/defconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index ab8c014eb314..ef8d92d42aa9 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -102,6 +102,8 @@ CONFIG_ARM_SCMI_CPUFREQ=y CONFIG_ARM_TEGRA186_CPUFREQ=y CONFIG_QORIQ_CPUFREQ=y CONFIG_ACPI=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HMAT=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y CONFIG_ACPI_APEI_PCIEAER=y @@ -126,6 +128,8 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_MEMORY_FAILURE=y CONFIG_TRANSPARENT_HUGEPAGE=y -- cgit v1.2.3 From e2c12540420dd32be43a61533d87050d5fb0bcdf Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Wed, 7 Sep 2022 12:02:35 +0100 Subject: arm64: Enable docker support in defconfig The arm64 defconfig does not support the docker usecase. Enable the missing configuration options. The resulting .config was validated with [1]. ... Generally Necessary: - cgroup hierarchy: properly mounted [/sys/fs/cgroup] - apparmor: enabled and tools installed - CONFIG_NAMESPACES: enabled - CONFIG_NET_NS: enabled - CONFIG_PID_NS: enabled - CONFIG_IPC_NS: enabled - CONFIG_UTS_NS: enabled - CONFIG_CGROUPS: enabled - CONFIG_CGROUP_CPUACCT: enabled - CONFIG_CGROUP_DEVICE: enabled - CONFIG_CGROUP_FREEZER: enabled - CONFIG_CGROUP_SCHED: enabled - CONFIG_CPUSETS: enabled - CONFIG_MEMCG: enabled - CONFIG_KEYS: enabled - CONFIG_VETH: enabled (as module) - CONFIG_BRIDGE: enabled (as module) - CONFIG_BRIDGE_NETFILTER: enabled (as module) - CONFIG_IP_NF_FILTER: enabled (as module) - CONFIG_IP_NF_TARGET_MASQUERADE: enabled (as module) - CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: enabled (as module) - CONFIG_NETFILTER_XT_MATCH_CONNTRACK: enabled (as module) - CONFIG_NETFILTER_XT_MATCH_IPVS: enabled (as module) - CONFIG_NETFILTER_XT_MARK: enabled (as module) - CONFIG_IP_NF_NAT: enabled (as module) - CONFIG_NF_NAT: enabled (as module) - CONFIG_POSIX_MQUEUE: enabled - CONFIG_CGROUP_BPF: enabled ... [1] https://github.com/moby/moby/blob/master/contrib/check-config.sh Cc: Will Deacon Cc: Arnd Bergmann Acked-by: Catalin Marinas Signed-off-by: Vincenzo Frascino Link: https://lore.kernel.org/r/20220907110235.14708-1-vincenzo.frascino@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/configs/defconfig | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index ef8d92d42aa9..ef3467092ded 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -18,6 +18,7 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y @@ -143,12 +144,16 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IPV6=m CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NETFILTER_XT_MARK=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_IP_VS=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -- cgit v1.2.3 From 0027d9c6c7b57b61b82f7275633ba89d0de2d2fd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 29 Aug 2022 16:49:20 +0100 Subject: arm64/ptrace: Support access to TPIDR2_EL0 SME introduces an additional EL0 register, TPIDR2_EL0, intended for use by userspace as part of the SME. Provide ptrace access to it through the existing NT_ARM_TLS regset used for TPIDR_EL0 by expanding it to two registers with TPIDR2_EL0 being the second one. Existing programs that query the size of the register set will be able to observe the increased size of the register set. Programs that assume the register set is single register will see no change. On systems that do not support SME TPIDR2_EL0 will read as 0 and writes will be ignored, support for SME should be queried via hwcaps as normal. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220829154921.837871-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index eb7c08dfb834..0e9764f73d61 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -666,10 +666,18 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, static int tls_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { + int ret; + if (target == current) tls_preserve_current_state(); - return membuf_store(&to, target->thread.uw.tp_value); + ret = membuf_store(&to, target->thread.uw.tp_value); + if (system_supports_tpidr2()) + ret = membuf_store(&to, target->thread.tpidr2_el0); + else + ret = membuf_zero(&to, sizeof(u64)); + + return ret; } static int tls_set(struct task_struct *target, const struct user_regset *regset, @@ -677,13 +685,20 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - unsigned long tls = target->thread.uw.tp_value; + unsigned long tls[2]; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + tls[0] = target->thread.uw.tp_value; + if (system_supports_sme()) + tls[1] = target->thread.tpidr2_el0; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); if (ret) return ret; - target->thread.uw.tp_value = tls; + target->thread.uw.tp_value = tls[0]; + if (system_supports_sme()) + target->thread.tpidr2_el0 = tls[1]; + return ret; } @@ -1392,7 +1407,7 @@ static const struct user_regset aarch64_regsets[] = { }, [REGSET_TLS] = { .core_note_type = NT_ARM_TLS, - .n = 1, + .n = 2, .size = sizeof(void *), .align = sizeof(void *), .regset_get = tls_get, -- cgit v1.2.3 From 3fb420f56cbf06b6ef8c39b886fed8f8f9aedee3 Mon Sep 17 00:00:00 2001 From: Li Huafei Date: Thu, 29 Sep 2022 17:41:32 +0800 Subject: arm64: module: Make plt_equals_entry() static Since commit 4e69ecf4da1e ("arm64/module: ftrace: deal with place relative nature of PLTs"), plt_equals_entry() is not used outside of module-plts.c, so make it static. Signed-off-by: Li Huafei Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220929094134.99512-2-lihuafei1@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/module.h | 1 - arch/arm64/kernel/module-plts.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 4e7fa2623896..28514b989a0b 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -58,7 +58,6 @@ static inline bool is_forbidden_offset_for_adrp(void *place) } struct plt_entry get_plt_entry(u64 dst, void *pc); -bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); static inline bool plt_entry_is_initialized(const struct plt_entry *e) { diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index a3d0494f25a9..5a0a8f552a61 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -37,7 +37,8 @@ struct plt_entry get_plt_entry(u64 dst, void *pc) return plt; } -bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b) +static bool plt_entries_equal(const struct plt_entry *a, + const struct plt_entry *b) { u64 p, q; -- cgit v1.2.3 From 5de2291605087b7a0cbc978c1a55dd4916f04e6e Mon Sep 17 00:00:00 2001 From: Li Huafei Date: Thu, 29 Sep 2022 17:41:33 +0800 Subject: arm64: module: Remove unused plt_entry_is_initialized() Since commit f1a54ae9af0d ("arm64: module/ftrace: intialize PLT at load time"), plt_entry_is_initialized() is unused anymore , so remove it. Signed-off-by: Li Huafei Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220929094134.99512-3-lihuafei1@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/module.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 28514b989a0b..8096d30c5e39 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -59,9 +59,4 @@ static inline bool is_forbidden_offset_for_adrp(void *place) struct plt_entry get_plt_entry(u64 dst, void *pc); -static inline bool plt_entry_is_initialized(const struct plt_entry *e) -{ - return e->adrp || e->add || e->br; -} - #endif /* __ASM_MODULE_H */ -- cgit v1.2.3 From 8cfb08575c6d4585f1ce0deeb189e5c824776b04 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 29 Sep 2022 14:45:25 +0100 Subject: arm64: ftrace: fix module PLTs with mcount Li Huafei reports that mcount-based ftrace with module PLTs was broken by commit: a6253579977e4c6f ("arm64: ftrace: consistently handle PLTs.") When a module PLTs are used and a module is loaded sufficiently far away from the kernel, we'll create PLTs for any branches which are out-of-range. These are separate from the special ftrace trampoline PLTs, which the module PLT code doesn't directly manipulate. When mcount is in use this is a problem, as each mcount callsite in a module will be initialized to point to a module PLT, but since commit a6253579977e4c6f ftrace_make_nop() will assume that the callsite has been initialized to point to the special ftrace trampoline PLT, and ftrace_find_callable_addr() rejects other cases. This means that when ftrace tries to initialize a callsite via ftrace_make_nop(), the call to ftrace_find_callable_addr() will find that the `_mcount` stub is out-of-range and is not handled by the ftrace PLT, resulting in a splat: | ftrace_test: loading out-of-tree module taints kernel. | ftrace: no module PLT for _mcount | ------------[ ftrace bug ]------------ | ftrace failed to modify | [] 0xffff800029180014 | actual: 44:00:00:94 | Initializing ftrace call sites | ftrace record flags: 2000000 | (0) | expected tramp: ffff80000802eb3c | ------------[ cut here ]------------ | WARNING: CPU: 3 PID: 157 at kernel/trace/ftrace.c:2120 ftrace_bug+0x94/0x270 | Modules linked in: | CPU: 3 PID: 157 Comm: insmod Tainted: G O 6.0.0-rc6-00151-gcd722513a189-dirty #22 | Hardware name: linux,dummy-virt (DT) | pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : ftrace_bug+0x94/0x270 | lr : ftrace_bug+0x21c/0x270 | sp : ffff80000b2bbaf0 | x29: ffff80000b2bbaf0 x28: 0000000000000000 x27: ffff0000c4d38000 | x26: 0000000000000001 x25: ffff800009d7e000 x24: ffff0000c4d86e00 | x23: 0000000002000000 x22: ffff80000a62b000 x21: ffff8000098ebea8 | x20: ffff0000c4d38000 x19: ffff80000aa24158 x18: ffffffffffffffff | x17: 0000000000000000 x16: 0a0d2d2d2d2d2d2d x15: ffff800009aa9118 | x14: 0000000000000000 x13: 6333626532303830 x12: 3030303866666666 | x11: 203a706d61727420 x10: 6465746365707865 x9 : 3362653230383030 | x8 : c0000000ffffefff x7 : 0000000000017fe8 x6 : 000000000000bff4 | x5 : 0000000000057fa8 x4 : 0000000000000000 x3 : 0000000000000001 | x2 : ad2cb14bb5438900 x1 : 0000000000000000 x0 : 0000000000000022 | Call trace: | ftrace_bug+0x94/0x270 | ftrace_process_locs+0x308/0x430 | ftrace_module_init+0x44/0x60 | load_module+0x15b4/0x1ce8 | __do_sys_init_module+0x1ec/0x238 | __arm64_sys_init_module+0x24/0x30 | invoke_syscall+0x54/0x118 | el0_svc_common.constprop.4+0x84/0x100 | do_el0_svc+0x3c/0xd0 | el0_svc+0x1c/0x50 | el0t_64_sync_handler+0x90/0xb8 | el0t_64_sync+0x15c/0x160 | ---[ end trace 0000000000000000 ]--- | ---------test_init----------- Fix this by reverting to the old behaviour of ignoring the old instruction when initialising an mcount callsite in a module, which was the behaviour prior to commit a6253579977e4c6f. Signed-off-by: Mark Rutland Fixes: a6253579977e ("arm64: ftrace: consistently handle PLTs.") Reported-by: Li Huafei Link: https://lore.kernel.org/linux-arm-kernel/20220929094134.99512-1-lihuafei1@huawei.com Cc: Ard Biesheuvel Cc: Will Deacon Link: https://lore.kernel.org/r/20220929134525.798593-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ftrace.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index ea5dc7c90f46..b49ba9a24bcc 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long pc = rec->ip; u32 old = 0, new; + new = aarch64_insn_gen_nop(); + + /* + * When using mcount, callsites in modules may have been initalized to + * call an arbitrary module PLT (which redirects to the _mcount stub) + * rather than the ftrace PLT we'll use at runtime (which redirects to + * the ftrace trampoline). We can ignore the old PLT when initializing + * the callsite. + * + * Note: 'mod' is only set at module load time. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && + IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { + return aarch64_insn_patch_text_nosync((void *)pc, new); + } + if (!ftrace_find_callable_addr(rec, mod, &addr)) return -EINVAL; old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); - new = aarch64_insn_gen_nop(); return ftrace_modify_code(pc, old, new, true); } -- cgit v1.2.3 From d56f66d2bd4a7133e7d6a4c814c7582b4a7d1000 Mon Sep 17 00:00:00 2001 From: James Clark Date: Thu, 22 Sep 2022 15:24:00 +0100 Subject: arm64: defconfig: Add Coresight as module Add Coresight to defconfig so that build errors are caught. CONFIG_CORESIGHT_SOURCE_ETM4X is excluded because it depends on CONFIG_PID_IN_CONTEXTIDR which has a performance cost. Signed-off-by: James Clark Link: https://lore.kernel.org/r/20220922142400.478815-2-james.clark@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/configs/defconfig | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index ef3467092ded..d699933cab45 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1340,4 +1340,12 @@ CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +CONFIG_CORESIGHT_CTI=m CONFIG_MEMTEST=y -- cgit v1.2.3