From 2d987e64e8c756ffcbace4a598444297df28b8a1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:01 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64MMFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64MMFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-5-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 12 ++++++------ arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/hyp/pgtable.c | 2 +- arch/arm64/kvm/reset.c | 12 ++++++------ 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index fa6e466ed57f..aac538c34f87 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -74,10 +74,10 @@ * - Non-context synchronizing exception entry and exit */ #define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ ) /* @@ -86,8 +86,8 @@ * - 16-bit ASID */ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASID), ID_AA64MMFR0_EL1_ASID_16) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 99c8d8b73e70..823eb4d03956 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) u64 mdcr_set = 0; /* Trap Debug Communications Channel registers */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids)) mdcr_set |= MDCR_EL2_TDCC; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 2cb3867eb7c2..cdf8e76b0be1 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data { static bool kvm_phys_is_valid(u64 phys) { - return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); + return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); } static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 0e08fbe68715..5ae18472205a 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); + ID_AA64MMFR0_EL1_PARANGE_SHIFT); /* * IPA size beyond 48 bits could not be supported * on either 4K or 16K page size. Hence let's cap @@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void) * on the system. */ if (PAGE_SIZE != SZ_64K) - parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); + parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48); /* * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at * Stage-2. If not, things will stop very quickly. */ - switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: + switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) { + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE: kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); return -EINVAL; - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT: kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); break; - case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: + case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX: kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); break; default: -- cgit v1.2.3 From a957c6be2b88564ed413a03a6009f11b1e5d5806 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:02 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64MMFR2_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64MMFR2_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-6-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/sysreg.h | 30 +++++++++--------- arch/arm64/kernel/cpufeature.c | 42 +++++++++++++------------- arch/arm64/kernel/head.S | 4 +-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 16 +++++----- 5 files changed, 47 insertions(+), 47 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index a6e7061d84e7..0d5ced93c740 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -612,7 +612,7 @@ alternative_endif .macro offset_ttbr1, ttbr, tmp #ifdef CONFIG_ARM64_VA_BITS_52 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 - and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) cbnz \tmp, .Lskipoffs_\@ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET .Lskipoffs_\@ : diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f9af77ab5f98..bb1f9ae5705f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -805,21 +805,21 @@ #define ID_AA64MMFR1_TIDCP1_IMP 1 /* id_aa64mmfr2 */ -#define ID_AA64MMFR2_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EVT_SHIFT 56 -#define ID_AA64MMFR2_BBM_SHIFT 52 -#define ID_AA64MMFR2_TTL_SHIFT 48 -#define ID_AA64MMFR2_FWB_SHIFT 40 -#define ID_AA64MMFR2_IDS_SHIFT 36 -#define ID_AA64MMFR2_AT_SHIFT 32 -#define ID_AA64MMFR2_ST_SHIFT 28 -#define ID_AA64MMFR2_NV_SHIFT 24 -#define ID_AA64MMFR2_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_LVA_SHIFT 16 -#define ID_AA64MMFR2_IESB_SHIFT 12 -#define ID_AA64MMFR2_LSM_SHIFT 8 -#define ID_AA64MMFR2_UAO_SHIFT 4 -#define ID_AA64MMFR2_CNP_SHIFT 0 +#define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 +#define ID_AA64MMFR2_EL1_EVT_SHIFT 56 +#define ID_AA64MMFR2_EL1_BBM_SHIFT 52 +#define ID_AA64MMFR2_EL1_TTL_SHIFT 48 +#define ID_AA64MMFR2_EL1_FWB_SHIFT 40 +#define ID_AA64MMFR2_EL1_IDS_SHIFT 36 +#define ID_AA64MMFR2_EL1_AT_SHIFT 32 +#define ID_AA64MMFR2_EL1_ST_SHIFT 28 +#define ID_AA64MMFR2_EL1_NV_SHIFT 24 +#define ID_AA64MMFR2_EL1_CCIDX_SHIFT 20 +#define ID_AA64MMFR2_EL1_LVA_SHIFT 16 +#define ID_AA64MMFR2_EL1_IESB_SHIFT 12 +#define ID_AA64MMFR2_EL1_LSM_SHIFT 8 +#define ID_AA64MMFR2_EL1_UAO_SHIFT 4 +#define ID_AA64MMFR2_EL1_CNP_SHIFT 0 /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3f4512267fe4..eb50d52dac1f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -378,21 +378,21 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LVA_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CNP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1571,7 +1571,7 @@ bool kaslr_requires_kpti(void) if (IS_ENABLED(CONFIG_ARM64_E0PD)) { u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); if (cpuid_feature_extract_unsigned_field(mmfr2, - ID_AA64MMFR2_E0PD_SHIFT)) + ID_AA64MMFR2_EL1_E0PD_SHIFT)) return false; } @@ -2303,7 +2303,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_STAGE2_FWB, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_FWB_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT, .field_width = 4, .min_field_value = 1, .matches = has_cpuid_feature, @@ -2314,7 +2314,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_ARMv8_4_TTL, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_TTL_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT, .field_width = 4, .min_field_value = 1, .matches = has_cpuid_feature, @@ -2380,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_useable_cnp, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_CNP_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_CNP_SHIFT, .field_width = 4, .min_field_value = 1, .cpu_enable = cpu_enable_cnp, @@ -2499,7 +2499,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, .field_width = 4, - .field_pos = ID_AA64MMFR2_E0PD_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT, .matches = has_cpuid_feature, .min_field_value = 1, .cpu_enable = cpu_enable_e0pd, @@ -2725,7 +2725,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bffb034d8f73..d040f57d3496 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry) */ #if VA_BITS > 48 mrs_s x0, SYS_ID_AA64MMFR2_EL1 - tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT + tst x0, #0xf << ID_AA64MMFR2_EL1_LVA_SHIFT mov x0, #VA_BITS mov x25, #VA_BITS_MIN csel x25, x25, x0, eq @@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 - and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + and x0, x0, #(0xf << ID_AA64MMFR2_EL1_LVA_SHIFT) cbnz x0, 2f update_early_cpu_boot_status \ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index aac538c34f87..3dad7b2079ee 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -120,14 +120,14 @@ * - E0PDx mechanism */ #define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \ ) /* -- cgit v1.2.3 From 55adc08d7e6433357f2b3b4fee248ae9da1fe2fa Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:03 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64PFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64PFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-7-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 12 ++--- arch/arm64/include/asm/el2_setup.h | 4 +- arch/arm64/include/asm/sysreg.h | 52 +++++++++---------- arch/arm64/kernel/cpufeature.c | 70 +++++++++++++------------- arch/arm64/kernel/hyp-stub.S | 2 +- arch/arm64/kernel/idreg-override.c | 2 +- arch/arm64/kernel/proton-pack.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 16 +++--- arch/arm64/kvm/hyp/nvhe/pkvm.c | 20 ++++---- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 8 +-- arch/arm64/kvm/sys_regs.c | 26 +++++----- drivers/irqchip/irq-gic-v4.c | 2 +- 13 files changed, 109 insertions(+), 109 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0d5ced93c740..48c7963abaf3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -524,7 +524,7 @@ alternative_endif */ .macro reset_amuserenr_el0, tmpreg mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 - ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 + ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 cbz \tmpreg, .Lskip_\@ // Skip if no AMU present msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 .Lskip_\@: diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 96ccf823f46e..8ba9f1c07432 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -603,21 +603,21 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT); - return val == ID_AA64PFR0_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; } static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT); - return val == ID_AA64PFR0_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; } static inline bool id_aa64pfr0_sve(u64 pfr0) { - u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT); return val > 0; } @@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope) pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); csv2_val = cpuid_feature_extract_unsigned_field(pfr0, - ID_AA64PFR0_CSV2_SHIFT); + ID_AA64PFR0_EL1_CSV2_SHIFT); return csv2_val == 3; } diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index faad9e01e52b..a011c87ec6e3 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -97,7 +97,7 @@ /* GICv3 system register access */ .macro __init_el2_gicv3 mrs x0, id_aa64pfr0_el1 - ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 + ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4 cbz x0, .Lskip_gicv3_\@ mrs_s x0, SYS_ICC_SRE_EL2 @@ -162,7 +162,7 @@ msr_s SYS_HFGITR_EL2, xzr mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU - ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 + ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 cbz x1, .Lskip_fgt_\@ msr_s SYS_HAFGRTR_EL2, xzr diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index bb1f9ae5705f..06f93aa9abb1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -686,32 +686,32 @@ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) /* id_aa64pfr0 */ -#define ID_AA64PFR0_CSV3_SHIFT 60 -#define ID_AA64PFR0_CSV2_SHIFT 56 -#define ID_AA64PFR0_DIT_SHIFT 48 -#define ID_AA64PFR0_AMU_SHIFT 44 -#define ID_AA64PFR0_MPAM_SHIFT 40 -#define ID_AA64PFR0_SEL2_SHIFT 36 -#define ID_AA64PFR0_SVE_SHIFT 32 -#define ID_AA64PFR0_RAS_SHIFT 28 -#define ID_AA64PFR0_GIC_SHIFT 24 -#define ID_AA64PFR0_ASIMD_SHIFT 20 -#define ID_AA64PFR0_FP_SHIFT 16 -#define ID_AA64PFR0_EL3_SHIFT 12 -#define ID_AA64PFR0_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_SHIFT 4 -#define ID_AA64PFR0_EL0_SHIFT 0 - -#define ID_AA64PFR0_AMU 0x1 -#define ID_AA64PFR0_SVE 0x1 -#define ID_AA64PFR0_RAS_V1 0x1 -#define ID_AA64PFR0_RAS_V1P1 0x2 -#define ID_AA64PFR0_FP_NI 0xf -#define ID_AA64PFR0_FP_SUPPORTED 0x0 -#define ID_AA64PFR0_ASIMD_NI 0xf -#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 -#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 +#define ID_AA64PFR0_EL1_CSV3_SHIFT 60 +#define ID_AA64PFR0_EL1_CSV2_SHIFT 56 +#define ID_AA64PFR0_EL1_DIT_SHIFT 48 +#define ID_AA64PFR0_EL1_AMU_SHIFT 44 +#define ID_AA64PFR0_EL1_MPAM_SHIFT 40 +#define ID_AA64PFR0_EL1_SEL2_SHIFT 36 +#define ID_AA64PFR0_EL1_SVE_SHIFT 32 +#define ID_AA64PFR0_EL1_RAS_SHIFT 28 +#define ID_AA64PFR0_EL1_GIC_SHIFT 24 +#define ID_AA64PFR0_EL1_ASIMD_SHIFT 20 +#define ID_AA64PFR0_EL1_FP_SHIFT 16 +#define ID_AA64PFR0_EL1_EL3_SHIFT 12 +#define ID_AA64PFR0_EL1_EL2_SHIFT 8 +#define ID_AA64PFR0_EL1_EL1_SHIFT 4 +#define ID_AA64PFR0_EL1_EL0_SHIFT 0 + +#define ID_AA64PFR0_EL1_AMU 0x1 +#define ID_AA64PFR0_EL1_SVE 0x1 +#define ID_AA64PFR0_EL1_RAS_V1 0x1 +#define ID_AA64PFR0_EL1_RAS_V1P1 0x2 +#define ID_AA64PFR0_EL1_FP_NI 0xf +#define ID_AA64PFR0_EL1_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_ASIMD_NI 0xf +#define ID_AA64PFR0_EL1_ASIMD_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64pfr1 */ #define ID_AA64PFR1_SME_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index eb50d52dac1f..3bda767af32d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -243,22 +243,22 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, ID_AA64PFR0_EL1_ASIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), ARM64_FTR_END, }; @@ -1492,7 +1492,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); return cpuid_feature_extract_signed_field(pfr0, - ID_AA64PFR0_FP_SHIFT) < 0; + ID_AA64PFR0_EL1_FP_SHIFT) < 0; } static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, @@ -2093,7 +2093,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_GIC_SHIFT, + .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2168,9 +2168,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_32bit_el0, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_EL0_SHIFT, + .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, + .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, }, #ifdef CONFIG_KVM { @@ -2180,9 +2180,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_EL1_SHIFT, + .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, + .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, }, { .desc = "Protected KVM", @@ -2201,7 +2201,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * more details. */ .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_CSV3_SHIFT, + .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT, .field_width = 4, .min_field_value = 1, .matches = unmap_kernel_at_el0, @@ -2244,9 +2244,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_SVE, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_SVE_SHIFT, + .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_SVE, + .min_field_value = ID_AA64PFR0_EL1_SVE, .matches = has_cpuid_feature, .cpu_enable = sve_kernel_enable, }, @@ -2259,9 +2259,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_RAS_SHIFT, + .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_RAS_V1, + .min_field_value = ID_AA64PFR0_EL1_RAS_V1, .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ @@ -2278,9 +2278,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_amu, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR0_AMU_SHIFT, + .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_AMU, + .min_field_value = ID_AA64PFR0_EL1_AMU, .cpu_enable = cpu_amu_enable, }, #endif /* CONFIG_ARM64_AMU_EXTN */ @@ -2485,7 +2485,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_GIC_SHIFT, + .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2708,11 +2708,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), @@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 12c7fad02ae5..f0644e945117 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync) SYM_CODE_END(elx_sync) SYM_CODE_START_LOCAL(__finalise_el2) - check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve + check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve .Linit_sve: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 1b0542c69738..7b90a9b4cc0a 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = { .name = "id_aa64pfr0", .override = &id_aa64pfr0_override, .fields = { - FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), + FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter), {} }, }; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 40be3a7c2c53..6ee586b4e235 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) /* If the CPU has CSV2 set, we're safe */ pfr0 = read_cpuid(ID_AA64PFR0_EL1); - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT)) return SPECTRE_UNAFFECTED; /* Alternatively, we have a list of unaffected CPUs */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 3dad7b2079ee..d94fb45a0e34 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -35,9 +35,9 @@ * - Data Independent Timing */ #define PVM_ID_AA64PFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ ) /* @@ -49,11 +49,11 @@ * Supported by KVM */ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_V1) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 823eb4d03956..d1fa03e2a449 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Protected KVM does not support AArch32 guests. */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); /* * Linux guests assume support for floating-point and Advanced SIMD. Do * not change the trapping behavior for these from the KVM default. */ - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), PVM_ID_AA64PFR0_ALLOW)); - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD), PVM_ID_AA64PFR0_ALLOW)); /* Trap RAS unless all current versions are supported */ - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < - ID_AA64PFR0_RAS_V1P1) { + if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < + ID_AA64PFR0_EL1_RAS_V1P1) { hcr_set |= HCR_TERR | HCR_TEA; hcr_clear |= HCR_FIEN; } /* Trap AMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { hcr_clear |= HCR_AMVOFFEN; cptr_set |= CPTR_EL2_TAM; } /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) cptr_set |= CPTR_EL2_TZ; vcpu->arch.hcr_el2 |= hcr_set; diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index e20fa4475dac..2ebf93336437 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); /* Spectre and Meltdown mitigation in KVM */ - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)kvm->arch.pfr0_csv2); - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), + set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)kvm->arch.pfr0_csv3); return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; @@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, * No support for AArch32 guests, therefore, pKVM has no sanitized copy * of AArch32 feature id registers. */ - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY); return pvm_access_raz_wi(vcpu, p, r); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3234f50b8c4b..cf1fc616e00d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1077,15 +1077,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, switch (id) { case SYS_ID_AA64PFR0_EL1: if (!vcpu_has_sve(vcpu)) - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); if (kvm_vgic_global_state.type == VGIC_V3) { - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1); } break; case SYS_ID_AA64PFR1_EL1: @@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, * it doesn't promise more than what is actually provided (the * guest could otherwise be covered in ectoplasmic residue). */ - csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); + csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT); if (csv2 > 1 || (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) return -EINVAL; /* Same thing for CSV3 */ - csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); + csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT); if (csv3 > 1 || (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) return -EINVAL; /* We can only differ with CSV[23], and anything else is an error */ val ^= read_id_reg(vcpu, rd, false); - val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | - (0xFUL << ID_AA64PFR0_CSV3_SHIFT)); + val &= ~((0xFUL << ID_AA64PFR0_EL1_CSV2_SHIFT) | + (0xFUL << ID_AA64PFR0_EL1_CSV3_SHIFT)); if (val) return -EINVAL; @@ -1825,7 +1825,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, } else { u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); + u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 4ea71b28f9f5..a6277dea4c7a 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void) { unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT); return fld >= 0x3; } -- cgit v1.2.3 From 6ca2b9ca459a598b78265477d288fdec8a0fdd6d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:04 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64PFR1_EL1 constant names Our standard is to include the _EL1 in the constant names for registers but we did not do that for ID_AA64PFR1_EL1, update to do so in preparation for conversion to automatic generation. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-8-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 ++-- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/sysreg.h | 34 ++++++++++----------- arch/arm64/kernel/cpufeature.c | 42 +++++++++++++------------- arch/arm64/kernel/hyp-stub.S | 2 +- arch/arm64/kernel/idreg-override.c | 6 ++-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 4 +-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 +- arch/arm64/kvm/sys_regs.c | 4 +-- arch/arm64/mm/mmu.c | 2 +- arch/arm64/mm/proc.S | 4 +-- 12 files changed, 55 insertions(+), 55 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8ba9f1c07432..214325a7f627 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -624,16 +624,16 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr1_sme(u64 pfr1) { - u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT); return val > 0; } static inline bool id_aa64pfr1_mte(u64 pfr1) { - u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); - return val >= ID_AA64PFR1_MTE; + return val >= ID_AA64PFR1_EL1_MTE; } void __init setup_cpu_features(void); diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index a011c87ec6e3..80ef55b66196 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -149,7 +149,7 @@ mov x0, xzr mrs x1, id_aa64pfr1_el1 - ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 + ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 cbz x1, .Lset_fgt_\@ /* Disable nVHE traps of TPIDR2 and SMPRI */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06f93aa9abb1..e72bab4452e9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -714,23 +714,23 @@ #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64pfr1 */ -#define ID_AA64PFR1_SME_SHIFT 24 -#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_RASFRAC_SHIFT 12 -#define ID_AA64PFR1_MTE_SHIFT 8 -#define ID_AA64PFR1_SSBS_SHIFT 4 -#define ID_AA64PFR1_BT_SHIFT 0 - -#define ID_AA64PFR1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_BT_BTI 0x1 -#define ID_AA64PFR1_SME 1 - -#define ID_AA64PFR1_MTE_NI 0x0 -#define ID_AA64PFR1_MTE_EL0 0x1 -#define ID_AA64PFR1_MTE 0x2 -#define ID_AA64PFR1_MTE_ASYMM 0x3 +#define ID_AA64PFR1_EL1_SME_SHIFT 24 +#define ID_AA64PFR1_EL1_MPAMFRAC_SHIFT 16 +#define ID_AA64PFR1_EL1_RASFRAC_SHIFT 12 +#define ID_AA64PFR1_EL1_MTE_SHIFT 8 +#define ID_AA64PFR1_EL1_SSBS_SHIFT 4 +#define ID_AA64PFR1_EL1_BT_SHIFT 0 + +#define ID_AA64PFR1_EL1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS 2 +#define ID_AA64PFR1_EL1_BT_BTI 0x1 +#define ID_AA64PFR1_EL1_SME 1 + +#define ID_AA64PFR1_EL1_MTE_NI 0x0 +#define ID_AA64PFR1_EL1_MTE_EL0 0x1 +#define ID_AA64PFR1_EL1_MTE 0x2 +#define ID_AA64PFR1_EL1_MTE_ASYMM 0x3 /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_ECV_SHIFT 60 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3bda767af32d..2e19cbdab50a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -264,14 +264,14 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAMFRAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2367,10 +2367,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_SSBS_SHIFT, + .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, + .min_field_value = ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY, }, #ifdef CONFIG_ARM64_CNP { @@ -2528,9 +2528,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = bti_enable, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_BT_SHIFT, + .field_pos = ID_AA64PFR1_EL1_BT_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_BT_BTI, + .min_field_value = ID_AA64PFR1_EL1_BT_BTI, .sign = FTR_UNSIGNED, }, #endif @@ -2541,9 +2541,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_MTE_SHIFT, + .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_MTE, + .min_field_value = ID_AA64PFR1_EL1_MTE, .sign = FTR_UNSIGNED, .cpu_enable = cpu_enable_mte, }, @@ -2553,9 +2553,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, - .field_pos = ID_AA64PFR1_MTE_SHIFT, + .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_MTE_ASYMM, + .min_field_value = ID_AA64PFR1_EL1_MTE_ASYMM, .sign = FTR_UNSIGNED, }, #endif /* CONFIG_ARM64_MTE */ @@ -2577,9 +2577,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_SME, .sys_reg = SYS_ID_AA64PFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64PFR1_SME_SHIFT, + .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR1_SME, + .min_field_value = ID_AA64PFR1_EL1_SME, .matches = has_cpuid_feature, .cpu_enable = sme_kernel_enable, }, @@ -2739,24 +2739,24 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif #ifdef CONFIG_ARM64_PTR_AUTH HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index f0644e945117..bce1f5f6b8c9 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) msr_s SYS_ZCR_EL2, x1 // length for EL1. .Lskip_sve: - check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme + check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme .Linit_sme: /* SME register access and priority mapping */ mrs x0, cptr_el2 // Disable SME traps diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 7b90a9b4cc0a..8c474915a11d 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = { .name = "id_aa64pfr1", .override = &id_aa64pfr1_override, .fields = { - FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), - FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), - FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), + FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ), + FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL), + FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter), {} }, }; diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index d94fb45a0e34..fad5406fc71a 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -62,8 +62,8 @@ * - Speculative Store Bypassing */ #define PVM_ID_AA64PFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ - ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ + ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \ + ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index d1fa03e2a449..05301d3b3fc2 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) u64 hcr_clear = 0; /* Memory Tagging: Trap and Treat as Untagged if not supported. */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) { hcr_set |= HCR_TID5; hcr_clear |= HCR_DCT | HCR_ATA; } diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 2ebf93336437..0f9ac25afdf4 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; if (!kvm_has_mte(kvm)) - allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); return id_aa64pfr1_el1_sys_val & allow_mask; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index cf1fc616e00d..ff4405a6ea25 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1090,9 +1090,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64PFR1_EL1: if (!kvm_has_mte(vcpu->kvm)) - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7ad44585f40..5810eddfb48e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); return cpuid_feature_extract_unsigned_field(pfr1, - ID_AA64PFR1_BT_SHIFT); + ID_AA64PFR1_EL1_BT_SHIFT); } /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7837a69524c5..15539da36bc3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup) * (ID_AA64PFR1_EL1[11:8] > 1). */ mrs x10, ID_AA64PFR1_EL1 - ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 - cmp x10, #ID_AA64PFR1_MTE + ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4 + cmp x10, #ID_AA64PFR1_EL1_MTE b.lt 1f /* Normal Tagged memory type at the corresponding MAIR index */ -- cgit v1.2.3 From ed7c138d6f82a9e75f27c9ff43262ba8158533b0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:05 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64MMFR0_EL1.BigEnd For some reason we refer to ID_AA64MMFR0_EL1.BigEnd as BIGENDEL. Remove the EL from the name, bringing the naming into sync with DDI0487H.a. Due to the large amount of MixedCase in this register which isn't really consistent with either the kernel style or the majority of the architecture the use of upper case is preserved. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-9-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 214325a7f627..d7b96dc9364b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -597,7 +597,7 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) { - return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT) == 0x1 || + return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 || cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1; } @@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void) mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); val = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_EL1_BIGENDEL_SHIFT); + ID_AA64MMFR0_EL1_BIGEND_SHIFT); return val == 0x1; } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e72bab4452e9..f1430c77911a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -744,7 +744,7 @@ #define ID_AA64MMFR0_EL1_TGRAN16_SHIFT 20 #define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 #define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_EL1_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 #define ID_AA64MMFR0_EL1_ASID_SHIFT 4 #define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2e19cbdab50a..def03583523b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -351,7 +351,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index fad5406fc71a..0ece26707fc0 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -74,7 +74,7 @@ * - Non-context synchronizing exception entry and exit */ #define PVM_ID_AA64MMFR0_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ -- cgit v1.2.3 From 07d7d848b96b57eccafeafad023d02c7f627d494 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:06 +0100 Subject: arm64/sysreg: Standardise naming of ID_AA64MMFR0_EL1.ASIDBits For some reason we refer to ID_AA64MMFR0_EL1.ASIDBits as ASID. Add BITS into the name, bringing the naming into sync with DDI0487H.a. Due to the large amount of MixedCase in this register which isn't really consistent with either the kernel style or the majority of the architecture the use of upper case is preserved. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-10-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 6 +++--- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- arch/arm64/mm/context.c | 6 +++--- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f1430c77911a..b6cd9996e12b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -745,11 +745,11 @@ #define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT 16 #define ID_AA64MMFR0_EL1_SNSMEM_SHIFT 12 #define ID_AA64MMFR0_EL1_BIGEND_SHIFT 8 -#define ID_AA64MMFR0_EL1_ASID_SHIFT 4 +#define ID_AA64MMFR0_EL1_ASIDBITS_SHIFT 4 #define ID_AA64MMFR0_EL1_PARANGE_SHIFT 0 -#define ID_AA64MMFR0_EL1_ASID_8 0x0 -#define ID_AA64MMFR0_EL1_ASID_16 0x2 +#define ID_AA64MMFR0_EL1_ASIDBITS_8 0x0 +#define ID_AA64MMFR0_EL1_ASIDBITS_16 0x2 #define ID_AA64MMFR0_EL1_TGRAN4_NI 0xf #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index def03583523b..ba44f67c5544 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -352,7 +352,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0), /* * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0ece26707fc0..0c2e474d0c9e 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -87,7 +87,7 @@ */ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASID), ID_AA64MMFR0_EL1_ASID_16) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \ ) /* diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8f38a5452d05..e1e0dca01839 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void) { u32 asid; int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), - ID_AA64MMFR0_EL1_ASID_SHIFT); + ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); switch (fld) { default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); fallthrough; - case ID_AA64MMFR0_EL1_ASID_8: + case ID_AA64MMFR0_EL1_ASIDBITS_8: asid = 8; break; - case ID_AA64MMFR0_EL1_ASID_16: + case ID_AA64MMFR0_EL1_ASIDBITS_16: asid = 16; } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index da67a75cdaad..5968a568aae2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -431,7 +431,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return false; /* We can support bigger ASIDs than the CPU, but not smaller */ - fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASID_SHIFT); + fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); asid_bits = fld ? 16 : 8; if (smmu->asid_bits < asid_bits) return false; -- cgit v1.2.3 From 6fcd019359028f3c6477508b329d69e27f41d895 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Mon, 5 Sep 2022 23:54:07 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64MMFR1_EL1 fields In preparation for converting the ID_AA64MMFR1_EL1 system register defines to automatic generation, rename them to follow the conventions used by other automatically generated registers: * Add _EL1 in the register name. * Rename fields to match the names in the ARM ARM: * LOR -> LO * HPD -> HPDS * VHE -> VH * HADBS -> HAFDBS * SPECSEI -> SpecSEI * VMIDBITS -> VMIDBits There should be no functional change as a result of this patch. Signed-off-by: Kristina Martsenko Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-11-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 8 +++--- arch/arm64/include/asm/el2_setup.h | 2 +- arch/arm64/include/asm/sysreg.h | 40 +++++++++++++------------- arch/arm64/kernel/cpufeature.c | 36 +++++++++++------------ arch/arm64/kernel/hyp-stub.S | 4 +-- arch/arm64/kernel/idreg-override.c | 2 +- arch/arm64/kernel/proton-pack.c | 2 +- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 12 ++++---- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/sys_regs.c | 2 +- 10 files changed, 55 insertions(+), 55 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index d7b96dc9364b..5fc43f7f3ed6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void) mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_HADBS_SHIFT); + ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } static inline bool cpu_has_pan(void) { u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_PAN_SHIFT); + ID_AA64MMFR1_EL1_PAN_SHIFT); } #ifdef CONFIG_ARM64_AMU_EXTN @@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) int vmid_bits; vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_VMIDBITS_SHIFT); - if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) + ID_AA64MMFR1_EL1_VMIDBits_SHIFT); + if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16) return 16; /* diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 80ef55b66196..b6e9bea7c9ec 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -83,7 +83,7 @@ /* LORegions */ .macro __init_el2_lor mrs x1, id_aa64mmfr1_el1 - ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 + ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4 cbz x0, .Lskip_lor_\@ msr_s SYS_LORC_EL1, xzr .Lskip_lor_\@: diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b6cd9996e12b..410b628fbb67 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -783,26 +783,26 @@ #endif /* id_aa64mmfr1 */ -#define ID_AA64MMFR1_ECBHB_SHIFT 60 -#define ID_AA64MMFR1_TIDCP1_SHIFT 52 -#define ID_AA64MMFR1_HCX_SHIFT 40 -#define ID_AA64MMFR1_AFP_SHIFT 44 -#define ID_AA64MMFR1_ETS_SHIFT 36 -#define ID_AA64MMFR1_TWED_SHIFT 32 -#define ID_AA64MMFR1_XNX_SHIFT 28 -#define ID_AA64MMFR1_SPECSEI_SHIFT 24 -#define ID_AA64MMFR1_PAN_SHIFT 20 -#define ID_AA64MMFR1_LOR_SHIFT 16 -#define ID_AA64MMFR1_HPD_SHIFT 12 -#define ID_AA64MMFR1_VHE_SHIFT 8 -#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 -#define ID_AA64MMFR1_HADBS_SHIFT 0 - -#define ID_AA64MMFR1_VMIDBITS_8 0 -#define ID_AA64MMFR1_VMIDBITS_16 2 - -#define ID_AA64MMFR1_TIDCP1_NI 0 -#define ID_AA64MMFR1_TIDCP1_IMP 1 +#define ID_AA64MMFR1_EL1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_EL1_TIDCP1_SHIFT 52 +#define ID_AA64MMFR1_EL1_HCX_SHIFT 40 +#define ID_AA64MMFR1_EL1_AFP_SHIFT 44 +#define ID_AA64MMFR1_EL1_ETS_SHIFT 36 +#define ID_AA64MMFR1_EL1_TWED_SHIFT 32 +#define ID_AA64MMFR1_EL1_XNX_SHIFT 28 +#define ID_AA64MMFR1_EL1_SpecSEI_SHIFT 24 +#define ID_AA64MMFR1_EL1_PAN_SHIFT 20 +#define ID_AA64MMFR1_EL1_LO_SHIFT 16 +#define ID_AA64MMFR1_EL1_HPDS_SHIFT 12 +#define ID_AA64MMFR1_EL1_VH_SHIFT 8 +#define ID_AA64MMFR1_EL1_VMIDBits_SHIFT 4 +#define ID_AA64MMFR1_EL1_HAFDBS_SHIFT 0 + +#define ID_AA64MMFR1_EL1_VMIDBits_8 0 +#define ID_AA64MMFR1_EL1_VMIDBits_16 2 + +#define ID_AA64MMFR1_EL1_TIDCP1_NI 0 +#define ID_AA64MMFR1_EL1_TIDCP1_IMP 1 /* id_aa64mmfr2 */ #define ID_AA64MMFR2_EL1_E0PD_SHIFT 60 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ba44f67c5544..534819afadd5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -362,18 +362,18 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2116,7 +2116,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR1_EL1, - .field_pos = ID_AA64MMFR1_PAN_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2130,7 +2130,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64MMFR1_EL1, - .field_pos = ID_AA64MMFR1_PAN_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 3, @@ -2344,7 +2344,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HW_DBM, .sys_reg = SYS_ID_AA64MMFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR1_HADBS_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT, .field_width = 4, .min_field_value = 2, .matches = has_hw_dbm, @@ -2614,9 +2614,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .sys_reg = SYS_ID_AA64MMFR1_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR1_TIDCP1_SHIFT, + .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT, .field_width = 4, - .min_field_value = ID_AA64MMFR1_TIDCP1_IMP, + .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP, .matches = has_cpuid_feature, .cpu_enable = cpu_trap_el0_impdef, }, @@ -2752,7 +2752,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), - HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index bce1f5f6b8c9..2ee18c860f2a 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? - ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 + ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 cbz x1, .Lskip_sme mrs_s x1, SYS_HCRX_EL2 @@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) tbnz x1, #0, 1f // Needs to be VHE capable, obviously - check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f + check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f 1: mov_q x0, HVC_STUB_ERR eret diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 8c474915a11d..95133765ed29 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = { .name = "id_aa64mmfr1", .override = &id_aa64mmfr1_override, .fields = { - FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter), + FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter), {} }, }; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 6ee586b4e235..fe3bc4c1c5ac 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope) mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, - ID_AA64MMFR1_ECBHB_SHIFT); + ID_AA64MMFR1_EL1_ECBHB_SHIFT); } bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0c2e474d0c9e..1653299ff8f8 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -100,12 +100,12 @@ * - Enhanced Translation Synchronization */ #define PVM_ID_AA64MMFR1_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ - ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 05301d3b3fc2..b92ecdd6bdab 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) u64 hcr_set = 0; /* Trap LOR */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids)) hcr_set |= HCR_TLOR; vcpu->arch.hcr_el2 |= hcr_set; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ff4405a6ea25..fa61793467a7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u32 sr = reg_to_encoding(r); - if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { + if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) { kvm_inject_undefined(vcpu); return false; } -- cgit v1.2.3 From ca951862ad3e6fa33d8ba8220b80f3fdc496821c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:09 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64MMFR2_EL1.CnP The kernel refers to ID_AA64MMFR2_EL1.CnP as CNP. In preparation for automatic generation of defines for the system registers bring the naming used by the kernel in sync with that of DDI0487H.a. No functional change. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-13-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpufeature.c | 4 ++-- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c80f1f7a10f1..7795a043a8ff 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -819,7 +819,7 @@ #define ID_AA64MMFR2_EL1_IESB_SHIFT 12 #define ID_AA64MMFR2_EL1_LSM_SHIFT 8 #define ID_AA64MMFR2_EL1_UAO_SHIFT 4 -#define ID_AA64MMFR2_EL1_CNP_SHIFT 0 +#define ID_AA64MMFR2_EL1_CnP_SHIFT 0 /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f927b4451613..2de9b28ee84d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -392,7 +392,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CNP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2380,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_useable_cnp, .sys_reg = SYS_ID_AA64MMFR2_EL1, .sign = FTR_UNSIGNED, - .field_pos = ID_AA64MMFR2_EL1_CNP_SHIFT, + .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT, .field_width = 4, .min_field_value = 1, .cpu_enable = cpu_enable_cnp, diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 1653299ff8f8..0ba290e1a791 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -120,7 +120,7 @@ * - E0PDx mechanism */ #define PVM_ID_AA64MMFR2_ALLOW (\ - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CNP) | \ + ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ -- cgit v1.2.3 From 4f8456c3199dab2436dab1f8ec17cba853fa1060 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:10 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64PFR0_EL1 constants We generally refer to the baseline feature implemented as _IMP so in preparation for automatic generation of register defines update those for ID_AA64PFR0_EL1 to reflect this. In the case of ASIMD we don't actually use the define so just remove it. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-14-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 9 ++++----- arch/arm64/kernel/cpufeature.c | 8 ++++---- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7795a043a8ff..c2fffb863f08 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -702,14 +702,13 @@ #define ID_AA64PFR0_EL1_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_EL0_SHIFT 0 -#define ID_AA64PFR0_EL1_AMU 0x1 -#define ID_AA64PFR0_EL1_SVE 0x1 -#define ID_AA64PFR0_EL1_RAS_V1 0x1 +#define ID_AA64PFR0_EL1_AMU_IMP 0x1 +#define ID_AA64PFR0_EL1_SVE_IMP 0x1 +#define ID_AA64PFR0_EL1_RAS_IMP 0x1 #define ID_AA64PFR0_EL1_RAS_V1P1 0x2 #define ID_AA64PFR0_EL1_FP_NI 0xf -#define ID_AA64PFR0_EL1_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_FP_IMP 0x0 #define ID_AA64PFR0_EL1_ASIMD_NI 0xf -#define ID_AA64PFR0_EL1_ASIMD_SUPPORTED 0x0 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2de9b28ee84d..43afa9a1cd73 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2246,7 +2246,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_SVE, + .min_field_value = ID_AA64PFR0_EL1_SVE_IMP, .matches = has_cpuid_feature, .cpu_enable = sve_kernel_enable, }, @@ -2261,7 +2261,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_RAS_V1, + .min_field_value = ID_AA64PFR0_EL1_RAS_IMP, .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ @@ -2280,7 +2280,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT, .field_width = 4, - .min_field_value = ID_AA64PFR0_EL1_AMU, + .min_field_value = ID_AA64PFR0_EL1_AMU_IMP, .cpu_enable = cpu_amu_enable, }, #endif /* CONFIG_ARM64_AMU_EXTN */ @@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 0ba290e1a791..6200d53600ba 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -53,7 +53,7 @@ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_V1) \ + FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \ ) /* -- cgit v1.2.3 From 5620b4b0371569b9ba65b756cd6d5fc6af47ddd6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Sep 2022 23:54:11 +0100 Subject: arm64/sysreg: Standardise naming for ID_AA64PFR0_EL1.AdvSIMD constants The architecture refers to the register field identifying advanced SIMD as AdvSIMD but the kernel refers to it as ASIMD. Use the architecture's naming. No functional changes. Signed-off-by: Mark Brown Reviewed-by: Kristina Martsenko Link: https://lore.kernel.org/r/20220905225425.1871461-15-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- arch/arm64/kernel/cpufeature.c | 6 +++--- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 2 +- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c2fffb863f08..78087b1a3ca4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -695,7 +695,7 @@ #define ID_AA64PFR0_EL1_SVE_SHIFT 32 #define ID_AA64PFR0_EL1_RAS_SHIFT 28 #define ID_AA64PFR0_EL1_GIC_SHIFT 24 -#define ID_AA64PFR0_EL1_ASIMD_SHIFT 20 +#define ID_AA64PFR0_EL1_AdvSIMD_SHIFT 20 #define ID_AA64PFR0_EL1_FP_SHIFT 16 #define ID_AA64PFR0_EL1_EL3_SHIFT 12 #define ID_AA64PFR0_EL1_EL2_SHIFT 8 @@ -708,7 +708,7 @@ #define ID_AA64PFR0_EL1_RAS_V1P1 0x2 #define ID_AA64PFR0_EL1_FP_NI 0xf #define ID_AA64PFR0_EL1_FP_IMP 0x0 -#define ID_AA64PFR0_EL1_ASIMD_NI 0xf +#define ID_AA64PFR0_EL1_AdvSIMD_NI 0xf #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 43afa9a1cd73..1610b35229e4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -253,7 +253,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), - S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, ID_AA64PFR0_EL1_ASIMD_NI), + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), @@ -2710,8 +2710,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 6200d53600ba..07edfc7524c9 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -36,7 +36,7 @@ */ #define PVM_ID_AA64PFR0_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD) | \ + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ ) diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index b92ecdd6bdab..fc3e32709ba2 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -31,7 +31,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) */ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), PVM_ID_AA64PFR0_ALLOW)); - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD), + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), PVM_ID_AA64PFR0_ALLOW)); /* Trap RAS unless all current versions are supported */ -- cgit v1.2.3 From bc8d75212d735ac9624c1d3532ad371ec9e570ae Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:39 +0100 Subject: arm64: stacktrace: simplify unwind_next_common() Currently unwind_next_common() takes a pointer to a stack_info which is only ever used within unwind_next_common(). Make it a local variable and simplify callers. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 12 ++++++------ arch/arm64/kernel/stacktrace.c | 3 +-- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 4 +--- arch/arm64/kvm/stacktrace.c | 4 +--- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index b8b20ef5aa5d..0fbae7c78b70 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -146,27 +146,27 @@ typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, struct stack_info *info); static inline int unwind_next_common(struct unwind_state *state, - struct stack_info *info, on_accessible_stack_fn accessible, stack_trace_translate_fp_fn translate_fp) { + struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; struct task_struct *tsk = state->task; if (fp & 0x7) return -EINVAL; - if (!accessible(tsk, fp, 16, info)) + if (!accessible(tsk, fp, 16, &info)) return -EINVAL; - if (test_bit(info->type, state->stacks_done)) + if (test_bit(info.type, state->stacks_done)) return -EINVAL; /* * If fp is not from the current address space perform the necessary * translation before dereferencing it to get the next fp. */ - if (translate_fp && !translate_fp(&kern_fp, info->type)) + if (translate_fp && !translate_fp(&kern_fp, info.type)) return -EINVAL; /* @@ -183,7 +183,7 @@ static inline int unwind_next_common(struct unwind_state *state, * stack to another, it's never valid to unwind back to that first * stack. */ - if (info->type == state->prev_type) { + if (info.type == state->prev_type) { if (fp <= state->prev_fp) return -EINVAL; } else { @@ -197,7 +197,7 @@ static inline int unwind_next_common(struct unwind_state *state, state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); state->prev_fp = fp; - state->prev_type = info->type; + state->prev_type = info.type; return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ce190ee18a20..056fb045d0e0 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -103,14 +103,13 @@ static int notrace unwind_next(struct unwind_state *state) { struct task_struct *tsk = state->task; unsigned long fp = state->fp; - struct stack_info info; int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_common(state, &info, on_accessible_stack, NULL); + err = unwind_next_common(state, on_accessible_stack, NULL); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 58f645ad66bc..50a1fa6b5c04 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -71,9 +71,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - struct stack_info info; - - return unwind_next_common(state, &info, on_accessible_stack, NULL); + return unwind_next_common(state, on_accessible_stack, NULL); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 949d19d603fb..b9cf551d9d31 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -97,9 +97,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - struct stack_info info; - - return unwind_next_common(state, &info, on_accessible_stack, + return unwind_next_common(state, on_accessible_stack, kvm_nvhe_stack_kern_va); } -- cgit v1.2.3 From b532ab5f23389e2141d8b586608f6435f83d5ecd Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:40 +0100 Subject: arm64: stacktrace: rename unwind_next_common() -> unwind_next_frame_record() The unwind_next_common() function unwinds a single frame record. There are other unwind steps (e.g. unwinding through trampolines) which are handled in the regular kernel unwinder, and in future there may be other common unwind helpers. Clarify the purpose of unwind_next_common() by renaming it to unwind_next_frame_record(). At the same time, add commentary, and delete the redundant comment at the top of asm/stacktrace/common.h. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 23 +++++++++++++---------- arch/arm64/kernel/stacktrace.c | 2 +- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 2 +- arch/arm64/kvm/stacktrace.c | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 0fbae7c78b70..a74fa301fe95 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -2,13 +2,6 @@ /* * Common arm64 stack unwinder code. * - * To implement a new arm64 stack unwinder: - * 1) Include this header - * - * 2) Call into unwind_next_common() from your top level unwind - * function, passing it the validation and translation callbacks - * (though the later can be NULL if no translation is required). - * * See: arch/arm64/kernel/stacktrace.c for the reference implementation. * * Copyright (C) 2012 ARM Ltd. @@ -145,9 +138,19 @@ typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info); -static inline int unwind_next_common(struct unwind_state *state, - on_accessible_stack_fn accessible, - stack_trace_translate_fp_fn translate_fp) +/** + * unwind_next_frame_record() - Unwind to the next frame record. + * + * @state: the current unwind state. + * @accessible: determines whether the frame record is accessible + * @translate_fp: translates the fp prior to access (may be NULL) + * + * Return: 0 upon success, an error code otherwise. + */ +static inline int +unwind_next_frame_record(struct unwind_state *state, + on_accessible_stack_fn accessible, + stack_trace_translate_fp_fn translate_fp) { struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 056fb045d0e0..4c8865e495fe 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -109,7 +109,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_common(state, on_accessible_stack, NULL); + err = unwind_next_frame_record(state, on_accessible_stack, NULL); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 50a1fa6b5c04..579b46aa9a55 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -71,7 +71,7 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - return unwind_next_common(state, on_accessible_stack, NULL); + return unwind_next_frame_record(state, on_accessible_stack, NULL); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index b9cf551d9d31..b69c18a26567 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -97,8 +97,8 @@ static bool on_accessible_stack(const struct task_struct *tsk, static int unwind_next(struct unwind_state *state) { - return unwind_next_common(state, on_accessible_stack, - kvm_nvhe_stack_kern_va); + return unwind_next_frame_record(state, on_accessible_stack, + kvm_nvhe_stack_kern_va); } static void unwind(struct unwind_state *state, -- cgit v1.2.3 From d1f684e46bbd43eac5c6fb00906c57425d7022a6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:43 +0100 Subject: arm64: stacktrace: rework stack boundary discovery In subsequent patches we'll want to acquire the stack boundaries ahead-of-time, and we'll need to be able to acquire the relevant stack_info regardless of whether we have an object the happens to be on the stack. This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX() helpers, with the caller being responsible for the checking whether an object is on a relevant stack. For the moment this is moved into the on_accessible_stack() functions, making these slightly larger; subsequent patches will remove the on_accessible_stack() functions and simplify the logic. The on_irq_stack() and on_task_stack() helpers are kept as these are used by IRQ entry sequences and stackleak respectively. As they're only used as predicates, the stack_info pointer parameter is removed in both cases. As the on_accessible_stack() functions are always passed a non-NULL info pointer, these now update info unconditionally. When updating the type to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as these will not be consumed this should have no adverse affect. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/include/asm/stacktrace.h | 78 ++++++++++++++++++------------ arch/arm64/include/asm/stacktrace/common.h | 28 ++++------- arch/arm64/kernel/ptrace.c | 2 +- arch/arm64/kernel/stacktrace.c | 65 ++++++++++++++++--------- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 37 ++++++++++---- arch/arm64/kvm/stacktrace.c | 37 ++++++++++---- 7 files changed, 153 insertions(+), 96 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 86eb0bfe3b38..61883518fc50 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task); * The top of the current task's task stack */ #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) -#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1)) #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index fa2df1ea22eb..aad0c6258721 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_irq_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_irq(void) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_IRQ, + }; } -static inline bool on_task_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) +static inline bool on_irq_stack(unsigned long sp, unsigned long size) +{ + struct stack_info info = stackinfo_get_irq(); + return stackinfo_on_stack(&info, sp, size); +} + +static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_TASK, + }; +} + +static inline bool on_task_stack(const struct task_struct *tsk, + unsigned long sp, unsigned long size) +{ + struct stack_info info = stackinfo_get_task(tsk); + return stackinfo_on_stack(&info, sp, size); } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_overflow(void) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } #else -static inline bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} +#define stackinfo_get_overflow() stackinfo_get_unknown() #endif #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); -static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_sdei_normal(void) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_SDEI_NORMAL, + }; } -static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static inline struct stack_info stackinfo_get_sdei_critical(void) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_SDEI_CRITICAL, + }; } #else -static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} - -static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, - struct stack_info *info) -{ - return false; -} +#define stackinfo_get_sdei_normal() stackinfo_get_unknown() +#define stackinfo_get_sdei_critical() stackinfo_get_unknown() #endif #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 81c21378b1ac..3c3bda34bb87 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -65,6 +65,15 @@ struct unwind_state { struct task_struct *task; }; +static inline struct stack_info stackinfo_get_unknown(void) +{ + return (struct stack_info) { + .low = 0, + .high = 0, + .type = STACK_TYPE_UNKNOWN, + }; +} + static inline bool stackinfo_on_stack(const struct stack_info *info, unsigned long sp, unsigned long size) { @@ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info, return true; } -static inline bool on_stack(unsigned long sp, unsigned long size, - unsigned long low, unsigned long high, - enum stack_type type, struct stack_info *info) -{ - struct stack_info tmp = { - .low = low, - .high = high, - .type = type, - }; - - if (!stackinfo_on_stack(&tmp, sp, size)) - return false; - - if (info) - *info = tmp; - - return true; -} - static inline void unwind_init_common(struct unwind_state *state, struct task_struct *task) { diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index eb7c08dfb834..00c05da7112c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || - on_irq_stack(addr, sizeof(unsigned long), NULL); + on_irq_stack(addr, sizeof(unsigned long)); } /** diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index edf9edca2055..ca56fd732c2a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -/* - * We can only safely access per-cpu stacks from current in a non-preemptible - * context. - */ static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - if (on_task_stack(tsk, sp, size, info)) - return true; - if (tsk != current || preemptible()) - return false; - if (on_irq_stack(sp, size, info)) - return true; - if (on_overflow_stack(sp, size, info)) - return true; - - if (IS_ENABLED(CONFIG_VMAP_STACK) && - IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) && - in_nmi()) { - if (on_sdei_critical_stack(sp, size, info)) - return true; - if (on_sdei_normal_stack(sp, size, info)) - return true; - } + tmp = stackinfo_get_task(tsk); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + /* + * We can only safely access per-cpu stacks when unwinding the current + * task in a non-preemptible context. + */ + if (tsk != current || preemptible()) + goto not_found; + + tmp = stackinfo_get_irq(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + /* + * We can only safely access SDEI stacks which unwinding the current + * task in an NMI context. + */ + if (!IS_ENABLED(CONFIG_VMAP_STACK) || + !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || + !in_nmi()) + goto not_found; + + tmp = stackinfo_get_sdei_normal(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_sdei_critical(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + +not_found: + *info = stackinfo_get_unknown(); return false; + +found: + *info = tmp; + return true; } /* diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 579b46aa9a55..5da0d44f61b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); -static bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_overflow(void) { unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } -static bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); unsigned long high = params->stack_hyp_va; unsigned long low = high - PAGE_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; } static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - return (on_overflow_stack(sp, size, info) || - on_hyp_stack(sp, size, info)); + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_hyp(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + *info = stackinfo_get_unknown(); + return false; + +found: + *info = tmp; + return true; } static int unwind_next(struct unwind_state *state) diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index b69c18a26567..26927344a263 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, return true; } -static bool on_overflow_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_overflow(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; } -static bool on_hyp_stack(unsigned long sp, unsigned long size, - struct stack_info *info) +static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->stack_base; unsigned long high = low + PAGE_SIZE; - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; } static bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info) { - if (info) - info->type = STACK_TYPE_UNKNOWN; + struct stack_info tmp; - return (on_overflow_stack(sp, size, info) || - on_hyp_stack(sp, size, info)); + tmp = stackinfo_get_overflow(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + tmp = stackinfo_get_hyp(); + if (stackinfo_on_stack(&tmp, sp, size)) + goto found; + + *info = stackinfo_get_unknown(); + return false; + +found: + *info = tmp; + return true; } static int unwind_next(struct unwind_state *state) -- cgit v1.2.3 From bd8abd68836b5c2b668afc4fb46d85d687779dec Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:44 +0100 Subject: arm64: stacktrace: remove stack type from fp translator In subsequent patches we'll remove the stack_type enum, and move the FP translation logic out of the raw FP unwind code. In preparation for doing so, this patch removes the type parameter from the FP translation callback, and modifies kvm_nvhe_stack_kern_va() to determine the relevant stack directly. So that kvm_nvhe_stack_kern_va() can use the stackinfo_*() helpers, these are moved earlier in the file, but are not modified in any way. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 6 +-- arch/arm64/kvm/stacktrace.c | 82 +++++++++++++++++------------- 2 files changed, 49 insertions(+), 39 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 3c3bda34bb87..2f972441f572 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -113,14 +113,12 @@ static inline void unwind_init_common(struct unwind_state *state, * pointer to a kernel address. * * @fp: the frame pointer to be updated to its kernel address. - * @type: the stack type associated with frame pointer @fp * * Return: true if the VA can be translated, false otherwise. * * Upon success @fp is updated to the corresponding kernel virtual address. */ -typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, - enum stack_type type); +typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); /** * typedef on_accessible_stack_fn() - Check whether a stack range is on any of @@ -172,7 +170,7 @@ unwind_next_frame_record(struct unwind_state *state, * If fp is not from the current address space perform the necessary * translation before dereferencing it to get the next fp. */ - if (translate_fp && !translate_fp(&kern_fp, info.type)) + if (translate_fp && !translate_fp(&kern_fp)) return -EINVAL; /* diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 26927344a263..7658c5db47c1 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -21,6 +21,34 @@ #include +static struct stack_info stackinfo_get_overflow(void) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_OVERFLOW, + }; +} + +static struct stack_info stackinfo_get_hyp(void) +{ + struct kvm_nvhe_stacktrace_info *stacktrace_info + = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); + unsigned long low = (unsigned long)stacktrace_info->stack_base; + unsigned long high = low + PAGE_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + .type = STACK_TYPE_HYP, + }; +} + /* * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * @@ -34,27 +62,31 @@ * Returns true on success and updates @addr to its corresponding kernel VA; * otherwise returns false. */ -static bool kvm_nvhe_stack_kern_va(unsigned long *addr, - enum stack_type type) +static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size) { struct kvm_nvhe_stacktrace_info *stacktrace_info; unsigned long hyp_base, kern_base, hyp_offset; + struct stack_info stack; stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - switch (type) { - case STACK_TYPE_HYP: + stack = stackinfo_get_hyp(); + if (stackinfo_on_stack(&stack, *addr, size)) { kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); hyp_base = (unsigned long)stacktrace_info->stack_base; - break; - case STACK_TYPE_OVERFLOW: + goto found; + } + + stack = stackinfo_get_overflow(); + if (stackinfo_on_stack(&stack, *addr, size)) { kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); hyp_base = (unsigned long)stacktrace_info->overflow_stack_base; - break; - default: - return false; + goto found; } + return false; + +found: hyp_offset = *addr - hyp_base; *addr = kern_base + hyp_offset; @@ -62,32 +94,12 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, return true; } -static struct stack_info stackinfo_get_overflow(void) -{ - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; - unsigned long high = low + OVERFLOW_STACK_SIZE; - - return (struct stack_info) { - .low = low, - .high = high, - .type = STACK_TYPE_OVERFLOW, - }; -} - -static struct stack_info stackinfo_get_hyp(void) +/* + * Convert a KVN nVHE HYP frame record address to a kernel VA + */ +static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) { - struct kvm_nvhe_stacktrace_info *stacktrace_info - = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - unsigned long low = (unsigned long)stacktrace_info->stack_base; - unsigned long high = low + PAGE_SIZE; - - return (struct stack_info) { - .low = low, - .high = high, - .type = STACK_TYPE_HYP, - }; + return kvm_nvhe_stack_kern_va(addr, 16); } static bool on_accessible_stack(const struct task_struct *tsk, @@ -115,7 +127,7 @@ found: static int unwind_next(struct unwind_state *state) { return unwind_next_frame_record(state, on_accessible_stack, - kvm_nvhe_stack_kern_va); + kvm_nvhe_stack_kern_record_va); } static void unwind(struct unwind_state *state, -- cgit v1.2.3 From 8df137300d1964c3810991aa2fe17a105348b647 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:45 +0100 Subject: arm64: stacktrace: track all stack boundaries explicitly Currently we call an on_accessible_stack() callback for each step of the unwinder, requiring redundant work to be performed in the core of the unwind loop (e.g. disabling preemption around accesses to per-cpu variables containing stack boundaries). To prevent unwind loops which go through a stack multiple times, we have to track the set of unwound stacks, requiring a stack_type enum which needs to cater for all the stacks of all possible callees. To prevent loops within a stack, we must track the prior FP values. This patch reworks the unwinder to minimize the work in the core of the unwinder, and to remove the need for the stack_type enum. The set of accessible stacks (and their boundaries) are determined at the start of the unwind, and the current stack is tracked during the unwind, with completed stacks removed from the set of accessible stacks. This makes the boundary checks more accurate (e.g. detecting overlapped frame records), and removes the need for separate tracking of the prior FP and visited stacks. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace.h | 5 - arch/arm64/include/asm/stacktrace/common.h | 164 +++++++++++++---------------- arch/arm64/kernel/stacktrace.c | 91 +++++++--------- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 35 ++---- arch/arm64/kvm/stacktrace.c | 36 ++----- 5 files changed, 132 insertions(+), 199 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index aad0c6258721..5a0edb064ea4 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -30,7 +30,6 @@ static inline struct stack_info stackinfo_get_irq(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_IRQ, }; } @@ -48,7 +47,6 @@ static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_TASK, }; } @@ -70,7 +68,6 @@ static inline struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } #else @@ -89,7 +86,6 @@ static inline struct stack_info stackinfo_get_sdei_normal(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_SDEI_NORMAL, }; } @@ -101,7 +97,6 @@ static inline struct stack_info stackinfo_get_sdei_critical(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_SDEI_CRITICAL, }; } #else diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 2f972441f572..638008f48597 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -9,26 +9,12 @@ #ifndef __ASM_STACKTRACE_COMMON_H #define __ASM_STACKTRACE_COMMON_H -#include -#include #include #include -enum stack_type { - STACK_TYPE_UNKNOWN, - STACK_TYPE_TASK, - STACK_TYPE_IRQ, - STACK_TYPE_OVERFLOW, - STACK_TYPE_SDEI_NORMAL, - STACK_TYPE_SDEI_CRITICAL, - STACK_TYPE_HYP, - __NR_STACK_TYPES -}; - struct stack_info { unsigned long low; unsigned long high; - enum stack_type type; }; /** @@ -37,32 +23,27 @@ struct stack_info { * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) * - * @stacks_done: Stacks which have been entirely unwound, for which it is no - * longer valid to unwind to. - * - * @prev_fp: The fp that pointed to this frame record, or a synthetic value - * of 0. This is used to ensure that within a stack, each - * subsequent frame record is at an increasing address. - * @prev_type: The type of stack this frame record was on, or a synthetic - * value of STACK_TYPE_UNKNOWN. This is used to detect a - * transition from one stack to another. - * * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * associated with the most recently encountered replacement lr * value. * * @task: The task being unwound. + * + * @stack: The stack currently being unwound. + * @stacks: An array of stacks which can be unwound. + * @nr_stacks: The number of stacks in @stacks. */ struct unwind_state { unsigned long fp; unsigned long pc; - DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); - unsigned long prev_fp; - enum stack_type prev_type; #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif struct task_struct *task; + + struct stack_info stack; + struct stack_info *stacks; + int nr_stacks; }; static inline struct stack_info stackinfo_get_unknown(void) @@ -70,7 +51,6 @@ static inline struct stack_info stackinfo_get_unknown(void) return (struct stack_info) { .low = 0, .high = 0, - .type = STACK_TYPE_UNKNOWN, }; } @@ -94,18 +74,7 @@ static inline void unwind_init_common(struct unwind_state *state, state->kr_cur = NULL; #endif - /* - * Prime the first unwind. - * - * In unwind_next() we'll check that the FP points to a valid stack, - * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be - * treated as a transition to whichever stack that happens to be. The - * prev_fp value won't be used, but we set it to 0 such that it is - * definitely not an accessible stack address. - */ - bitmap_zero(state->stacks_done, __NR_STACK_TYPES); - state->prev_fp = 0; - state->prev_type = STACK_TYPE_UNKNOWN; + state->stack = stackinfo_get_unknown(); } /** @@ -120,51 +89,94 @@ static inline void unwind_init_common(struct unwind_state *state, */ typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); +static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, + unsigned long sp, + unsigned long size) +{ + for (int i = 0; i < state->nr_stacks; i++) { + struct stack_info *info = &state->stacks[i]; + + if (stackinfo_on_stack(info, sp, size)) + return info; + } + + return NULL; +} + /** - * typedef on_accessible_stack_fn() - Check whether a stack range is on any of - * the possible stacks. - * - * @tsk: task whose stack is being unwound - * @sp: stack address being checked - * @size: size of the stack range being checked - * @info: stack unwinding context - * - * Return: true if the stack range is accessible, false otherwise. + * unwind_consume_stack() - Check if an object is on an accessible stack, + * updating stack boundaries so that future unwind steps cannot consume this + * object again. * - * Upon success @info is updated with information for the relevant stack. + * @state: the current unwind state. + * @sp: the base address of the object. + * @size: the size of the object. * - * Upon failure @info is updated with the UNKNOWN stack. + * Return: 0 upon success, an error code otherwise. */ -typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info); +static inline int unwind_consume_stack(struct unwind_state *state, + unsigned long sp, + unsigned long size) +{ + struct stack_info *next; + + if (stackinfo_on_stack(&state->stack, sp, size)) + goto found; + + next = unwind_find_next_stack(state, sp, size); + if (!next) + return -EINVAL; + + /* + * Stack transitions are strictly one-way, and once we've + * transitioned from one stack to another, it's never valid to + * unwind back to the old stack. + * + * Remove the current stack from the list of stacks so that it cannot + * be found on a subsequent transition. + * + * Note that stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * HYP -> OVERFLOW + * + * ... so we do not check the specific order of stack + * transitions. + */ + state->stack = *next; + *next = stackinfo_get_unknown(); + +found: + /* + * Future unwind steps can only consume stack above this frame record. + * Update the current stack to start immediately above it. + */ + state->stack.low = sp + size; + return 0; +} /** * unwind_next_frame_record() - Unwind to the next frame record. * * @state: the current unwind state. - * @accessible: determines whether the frame record is accessible * @translate_fp: translates the fp prior to access (may be NULL) * * Return: 0 upon success, an error code otherwise. */ static inline int unwind_next_frame_record(struct unwind_state *state, - on_accessible_stack_fn accessible, stack_trace_translate_fp_fn translate_fp) { - struct stack_info info; unsigned long fp = state->fp, kern_fp = fp; - struct task_struct *tsk = state->task; + int err; if (fp & 0x7) return -EINVAL; - if (!accessible(tsk, fp, 16, &info)) - return -EINVAL; - - if (test_bit(info.type, state->stacks_done)) - return -EINVAL; + err = unwind_consume_stack(state, fp, 16); + if (err) + return err; /* * If fp is not from the current address space perform the necessary @@ -174,34 +186,10 @@ unwind_next_frame_record(struct unwind_state *state, return -EINVAL; /* - * As stacks grow downward, any valid record on the same stack must be - * at a strictly higher address than the prior record. - * - * Stacks can nest in several valid orders, e.g. - * - * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL - * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW - * HYP -> OVERFLOW - * - * ... but the nesting itself is strict. Once we transition from one - * stack to another, it's never valid to unwind back to that first - * stack. - */ - if (info.type == state->prev_type) { - if (fp <= state->prev_fp) - return -EINVAL; - } else { - __set_bit(state->prev_type, state->stacks_done); - } - - /* - * Record this frame record's values and location. The prev_fp and - * prev_type are only meaningful to the next unwind_next() invocation. + * Record this frame record's values. */ state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); - state->prev_fp = fp; - state->prev_type = info.type; return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index ca56fd732c2a..9c8820f24262 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -67,57 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_task(tsk); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - /* - * We can only safely access per-cpu stacks when unwinding the current - * task in a non-preemptible context. - */ - if (tsk != current || preemptible()) - goto not_found; - - tmp = stackinfo_get_irq(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - /* - * We can only safely access SDEI stacks which unwinding the current - * task in an NMI context. - */ - if (!IS_ENABLED(CONFIG_VMAP_STACK) || - !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || - !in_nmi()) - goto not_found; - - tmp = stackinfo_get_sdei_normal(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_sdei_critical(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - -not_found: - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - /* * Unwind from one frame record (A) to the next frame record (B). * @@ -135,7 +84,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state, on_accessible_stack, NULL); + err = unwind_next_frame_record(state, NULL); if (err) return err; @@ -215,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) barrier(); } +/* + * Per-cpu stacks are only accessible when unwinding the current task in a + * non-preemptible context. + */ +#define STACKINFO_CPU(name) \ + ({ \ + ((task == current) && !preemptible()) \ + ? stackinfo_get_##name() \ + : stackinfo_get_unknown(); \ + }) + +/* + * SDEI stacks are only accessible when unwinding the current task in an NMI + * context. + */ +#define STACKINFO_SDEI(name) \ + ({ \ + ((task == current) && in_nmi()) \ + ? stackinfo_get_sdei_##name() \ + : stackinfo_get_unknown(); \ + }) + noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), +#endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; if (regs) { if (task != current) diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 5da0d44f61b7..08e1325ead73 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -47,7 +47,6 @@ static struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } @@ -60,35 +59,12 @@ static struct stack_info stackinfo_get_hyp(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_HYP, }; } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_hyp(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, on_accessible_stack, NULL); + return unwind_next_frame_record(state, NULL); } static void notrace unwind(struct unwind_state *state, @@ -144,7 +120,14 @@ static bool pkvm_save_backtrace_entry(void *arg, unsigned long where) */ static void pkvm_save_backtrace(unsigned long fp, unsigned long pc) { - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_overflow(), + stackinfo_get_hyp(), + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; int idx = 0; kvm_nvhe_unwind_init(&state, fp, pc); diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 7658c5db47c1..0b4703945780 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -31,7 +31,6 @@ static struct stack_info stackinfo_get_overflow(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_OVERFLOW, }; } @@ -45,7 +44,6 @@ static struct stack_info stackinfo_get_hyp(void) return (struct stack_info) { .low = low, .high = high, - .type = STACK_TYPE_HYP, }; } @@ -102,32 +100,9 @@ static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) return kvm_nvhe_stack_kern_va(addr, 16); } -static bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, unsigned long size, - struct stack_info *info) -{ - struct stack_info tmp; - - tmp = stackinfo_get_overflow(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - tmp = stackinfo_get_hyp(); - if (stackinfo_on_stack(&tmp, sp, size)) - goto found; - - *info = stackinfo_get_unknown(); - return false; - -found: - *info = tmp; - return true; -} - static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, on_accessible_stack, - kvm_nvhe_stack_kern_record_va); + return unwind_next_frame_record(state, kvm_nvhe_stack_kern_record_va); } static void unwind(struct unwind_state *state, @@ -185,7 +160,14 @@ static void kvm_nvhe_dump_backtrace_end(void) static void hyp_dump_backtrace(unsigned long hyp_offset) { struct kvm_nvhe_stacktrace_info *stacktrace_info; - struct unwind_state state; + struct stack_info stacks[] = { + stackinfo_get_overflow(), + stackinfo_get_hyp(), + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); -- cgit v1.2.3 From 4b5e694e25ca2cbb5c97694a7d2a473f35099829 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 1 Sep 2022 14:06:46 +0100 Subject: arm64: stacktrace: track hyp stacks in unwinder's address space Currently unwind_next_frame_record() has an optional callback to convert the address space of the FP. This is necessary for the NVHE unwinder, which tracks the stacks in the hyp VA space, but accesses the frame records in the kernel VA space. This is a bit unfortunate since it clutters unwind_next_frame_record(), which will get in the way of future rework. Instead, this patch changes the NVHE unwinder to track the stacks in the kernel's VA space and translate to FP prior to calling unwind_next_frame_record(). This removes the need for the translate_fp() callback, as all unwinders consistently track stacks in the native address space of the unwinder. At the same time, this patch consolidates the generation of the stack addresses behind the stackinfo_get_*() helpers. Signed-off-by: Mark Rutland Reviewed-by: Kalesh Singh Reviewed-by: Madhavan T. Venkataraman Reviewed-by: Mark Brown Cc: Fuad Tabba Cc: Marc Zyngier Cc: Will Deacon Link: https://lore.kernel.org/r/20220901130646.1316937-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/stacktrace/common.h | 29 ++------------ arch/arm64/kernel/stacktrace.c | 2 +- arch/arm64/kvm/hyp/nvhe/stacktrace.c | 2 +- arch/arm64/kvm/stacktrace.c | 62 +++++++++++++++++++----------- 4 files changed, 46 insertions(+), 49 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 638008f48597..508f734de46e 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -77,18 +77,6 @@ static inline void unwind_init_common(struct unwind_state *state, state->stack = stackinfo_get_unknown(); } -/** - * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame - * pointer to a kernel address. - * - * @fp: the frame pointer to be updated to its kernel address. - * - * Return: true if the VA can be translated, false otherwise. - * - * Upon success @fp is updated to the corresponding kernel virtual address. - */ -typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); - static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, unsigned long sp, unsigned long size) @@ -160,15 +148,13 @@ found: * unwind_next_frame_record() - Unwind to the next frame record. * * @state: the current unwind state. - * @translate_fp: translates the fp prior to access (may be NULL) * * Return: 0 upon success, an error code otherwise. */ static inline int -unwind_next_frame_record(struct unwind_state *state, - stack_trace_translate_fp_fn translate_fp) +unwind_next_frame_record(struct unwind_state *state) { - unsigned long fp = state->fp, kern_fp = fp; + unsigned long fp = state->fp; int err; if (fp & 0x7) @@ -178,18 +164,11 @@ unwind_next_frame_record(struct unwind_state *state, if (err) return err; - /* - * If fp is not from the current address space perform the necessary - * translation before dereferencing it to get the next fp. - */ - if (translate_fp && !translate_fp(&kern_fp)) - return -EINVAL; - /* * Record this frame record's values. */ - state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); - state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); + state->fp = READ_ONCE(*(unsigned long *)(fp)); + state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); return 0; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 9c8820f24262..634279b3b03d 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -84,7 +84,7 @@ static int notrace unwind_next(struct unwind_state *state) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state, NULL); + err = unwind_next_frame_record(state); if (err) return err; diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index 08e1325ead73..ed6b58b19cfa 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -64,7 +64,7 @@ static struct stack_info stackinfo_get_hyp(void) static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, NULL); + return unwind_next_frame_record(state); } static void notrace unwind(struct unwind_state *state, diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 0b4703945780..3ace5b75813b 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -34,6 +34,17 @@ static struct stack_info stackinfo_get_overflow(void) }; } +static struct stack_info stackinfo_get_overflow_kern_va(void) +{ + unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} + static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_stacktrace_info *stacktrace_info @@ -47,6 +58,17 @@ static struct stack_info stackinfo_get_hyp(void) }; } +static struct stack_info stackinfo_get_hyp_kern_va(void) +{ + unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); + unsigned long high = low + PAGE_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} + /* * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * @@ -62,33 +84,22 @@ static struct stack_info stackinfo_get_hyp(void) */ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size) { - struct kvm_nvhe_stacktrace_info *stacktrace_info; - unsigned long hyp_base, kern_base, hyp_offset; - struct stack_info stack; + struct stack_info stack_hyp, stack_kern; - stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); - - stack = stackinfo_get_hyp(); - if (stackinfo_on_stack(&stack, *addr, size)) { - kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); - hyp_base = (unsigned long)stacktrace_info->stack_base; + stack_hyp = stackinfo_get_hyp(); + stack_kern = stackinfo_get_hyp_kern_va(); + if (stackinfo_on_stack(&stack_hyp, *addr, size)) goto found; - } - stack = stackinfo_get_overflow(); - if (stackinfo_on_stack(&stack, *addr, size)) { - kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack); - hyp_base = (unsigned long)stacktrace_info->overflow_stack_base; + stack_hyp = stackinfo_get_overflow(); + stack_kern = stackinfo_get_overflow_kern_va(); + if (stackinfo_on_stack(&stack_hyp, *addr, size)) goto found; - } return false; found: - hyp_offset = *addr - hyp_base; - - *addr = kern_base + hyp_offset; - + *addr = *addr - stack_hyp.low + stack_kern.low; return true; } @@ -102,7 +113,14 @@ static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr) static int unwind_next(struct unwind_state *state) { - return unwind_next_frame_record(state, kvm_nvhe_stack_kern_record_va); + /* + * The FP is in the hypervisor VA space. Convert it to the kernel VA + * space so it can be unwound by the regular unwind functions. + */ + if (!kvm_nvhe_stack_kern_record_va(&state->fp)) + return -EINVAL; + + return unwind_next_frame_record(state); } static void unwind(struct unwind_state *state, @@ -161,8 +179,8 @@ static void hyp_dump_backtrace(unsigned long hyp_offset) { struct kvm_nvhe_stacktrace_info *stacktrace_info; struct stack_info stacks[] = { - stackinfo_get_overflow(), - stackinfo_get_hyp(), + stackinfo_get_overflow_kern_va(), + stackinfo_get_hyp_kern_va(), }; struct unwind_state state = { .stacks = stacks, -- cgit v1.2.3 From c0357a73fa4a96d8ed9ee46e9927d9fcbc9d0828 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:49 +0100 Subject: arm64/sysreg: Align field names in ID_AA64DFR0_EL1 with architecture The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1 does not align well with kernel conventions, using as it does a lot of MixedCase in various arrangements. In preparation for automatically generating the defines for this register rename the defines used to match what is in the architecture. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-2-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/el2_setup.h | 8 +++---- arch/arm64/include/asm/hw_breakpoint.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 40 +++++++++++++++++----------------- arch/arm64/kernel/cpufeature.c | 14 ++++++------ arch/arm64/kernel/debug-monitors.c | 2 +- arch/arm64/kernel/perf_event.c | 8 +++---- arch/arm64/kvm/debug.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 12 +++++----- arch/arm64/kvm/pmu-emul.c | 16 +++++++------- arch/arm64/kvm/sys_regs.c | 16 +++++++------- 12 files changed, 64 insertions(+), 64 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c1fc5f7bb978..71222f2b8a5a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -512,7 +512,7 @@ alternative_endif */ .macro reset_pmuserenr_el0, tmpreg mrs \tmpreg, id_aa64dfr0_el1 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 + sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 cmp \tmpreg, #1 // Skip if no PMU present b.lt 9000f msr pmuserenr_el0, xzr // Disable PMU access from EL0 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 79bb9e58d9c6..e3b63967c8a9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) u64 mask = GENMASK_ULL(field + 3, field); /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ - if (val == ID_AA64DFR0_PMUVER_IMP_DEF) + if (val == ID_AA64DFR0_PMUVer_IMP_DEF) val = 0; if (val > cap) { diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b6e9bea7c9ec..03af4278bc23 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -40,7 +40,7 @@ .macro __init_el2_debug mrs x1, id_aa64dfr0_el1 - sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 + sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 cmp x0, #1 b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps @@ -49,7 +49,7 @@ csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ - ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, @@ -65,7 +65,7 @@ .Lskip_spe_\@: /* Trace buffer */ - ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present mrs_s x0, SYS_TRBIDR_EL1 @@ -137,7 +137,7 @@ mov x0, xzr mrs x1, id_aa64dfr0_el1 - ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 cmp x1, #3 b.lt .Lset_debug_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index bc7aaed4b34e..d667c03d5f5e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -142,7 +142,7 @@ static inline int get_num_brps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_BRPS_SHIFT); + ID_AA64DFR0_BRPs_SHIFT); } /* Determine number of WRP registers available. */ @@ -151,7 +151,7 @@ static inline int get_num_wrps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_WRPS_SHIFT); + ID_AA64DFR0_WRPs_SHIFT); } #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c7876363c6e5..b1e9e4d3d964 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -700,26 +700,26 @@ /* id_aa64dfr0 */ #define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TRBE_SHIFT 44 -#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 -#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 -#define ID_AA64DFR0_PMSVER_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 -#define ID_AA64DFR0_WRPS_SHIFT 20 -#define ID_AA64DFR0_BRPS_SHIFT 12 -#define ID_AA64DFR0_PMUVER_SHIFT 8 -#define ID_AA64DFR0_TRACEVER_SHIFT 4 -#define ID_AA64DFR0_DEBUGVER_SHIFT 0 - -#define ID_AA64DFR0_PMUVER_8_0 0x1 -#define ID_AA64DFR0_PMUVER_8_1 0x4 -#define ID_AA64DFR0_PMUVER_8_4 0x5 -#define ID_AA64DFR0_PMUVER_8_5 0x6 -#define ID_AA64DFR0_PMUVER_8_7 0x7 -#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVER_8_2 0x1 -#define ID_AA64DFR0_PMSVER_8_3 0x2 +#define ID_AA64DFR0_TraceBuffer_SHIFT 44 +#define ID_AA64DFR0_TraceFilt_SHIFT 40 +#define ID_AA64DFR0_DoubleLock_SHIFT 36 +#define ID_AA64DFR0_PMSVer_SHIFT 32 +#define ID_AA64DFR0_CTX_CMPs_SHIFT 28 +#define ID_AA64DFR0_WRPs_SHIFT 20 +#define ID_AA64DFR0_BRPs_SHIFT 12 +#define ID_AA64DFR0_PMUVer_SHIFT 8 +#define ID_AA64DFR0_TraceVer_SHIFT 4 +#define ID_AA64DFR0_DebugVer_SHIFT 0 + +#define ID_AA64DFR0_PMUVer_8_0 0x1 +#define ID_AA64DFR0_PMUVer_8_1 0x4 +#define ID_AA64DFR0_PMUVer_8_4 0x5 +#define ID_AA64DFR0_PMUVer_8_5 0x6 +#define ID_AA64DFR0_PMUVer_8_7 0x7 +#define ID_AA64DFR0_PMUVer_IMP_DEF 0xf + +#define ID_AA64DFR0_PMSVer_8_2 0x1 +#define ID_AA64DFR0_PMSVer_8_3 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c2e42feb3e1a..c694d6cbf82d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), /* * We can instantiate multiple PMU instances with different levels * of support. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index bf9fe71589bc..572c8ac2873c 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -28,7 +28,7 @@ u8 debug_monitors_arch(void) { return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), - ID_AA64DFR0_DEBUGVER_SHIFT); + ID_AA64DFR0_DebugVer_SHIFT); } /* diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cb69ff1e6138..83f7a8980623 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) dfr0 = read_sysreg(id_aa64dfr0_el1); pmuver = cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_PMUVER_SHIFT); - if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0) + ID_AA64DFR0_PMUVer_SHIFT); + if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) return; cpu_pmu->pmuver = pmuver; @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 0b28d7db7c76..14878cd55c53 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * If SPE is present on this CPU and is available at current EL, * we may need to check if the host state needs to be saved. */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); } diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index fc3e32709ba2..ba1327fac03f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Trap/constrain PMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK; } /* Trap Debug */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; /* Trap OS Double Lock */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) mdcr_set |= MDCR_EL2_TDOSA; /* Trap SPE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPMS; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; } /* Trap Trace Filter */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) cptr_set |= CPTR_EL2_TTA; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 11c43bed5f97..331478c67dca 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_PMUVER_8_0: + case ID_AA64DFR0_PMUVer_8_0: return GENMASK(9, 0); - case ID_AA64DFR0_PMUVER_8_1: - case ID_AA64DFR0_PMUVER_8_4: - case ID_AA64DFR0_PMUVER_8_5: - case ID_AA64DFR0_PMUVER_8_7: + case ID_AA64DFR0_PMUVer_8_1: + case ID_AA64DFR0_PMUVer_8_4: + case ID_AA64DFR0_PMUVer_8_5: + case ID_AA64DFR0_PMUVer_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) return; mutex_lock(&arm_pmus_lock); @@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) if (event->pmu) { pmu = to_arm_pmu(event->pmu); if (pmu->pmuver == 0 || - pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) + pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) pmu = NULL; } @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index fa61793467a7..626d7769e23d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64DFR0_EL1: /* Limit debug to ARMv8.0 */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVER_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); + ID_AA64DFR0_PMUVer_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); /* Hide SPE from guests */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); break; case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.4 */ @@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); - p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) + p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } -- cgit v1.2.3 From fcf37b38ff2282ef3dc6ba1966c83b29e5734edd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:50 +0100 Subject: arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names Normally we include the full register name in the defines for fields within registers but this has not been followed for ID registers. In preparation for automatic generation of defines add the _EL1s into the defines for ID_AA64DFR0_EL1 to follow the convention. No functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-3-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 2 +- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/el2_setup.h | 8 +++---- arch/arm64/include/asm/hw_breakpoint.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 42 +++++++++++++++++----------------- arch/arm64/kernel/cpufeature.c | 14 ++++++------ arch/arm64/kernel/debug-monitors.c | 2 +- arch/arm64/kernel/perf_event.c | 8 +++---- arch/arm64/kvm/debug.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 12 +++++----- arch/arm64/kvm/pmu-emul.c | 16 ++++++------- arch/arm64/kvm/sys_regs.c | 16 ++++++------- 12 files changed, 65 insertions(+), 65 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 71222f2b8a5a..cf8e72e733de 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -512,7 +512,7 @@ alternative_endif */ .macro reset_pmuserenr_el0, tmpreg mrs \tmpreg, id_aa64dfr0_el1 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 + sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 cmp \tmpreg, #1 // Skip if no PMU present b.lt 9000f msr pmuserenr_el0, xzr // Disable PMU access from EL0 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e3b63967c8a9..ff06e6fb5939 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) u64 mask = GENMASK_ULL(field + 3, field); /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ - if (val == ID_AA64DFR0_PMUVer_IMP_DEF) + if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) val = 0; if (val > cap) { diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 03af4278bc23..668569adf4d3 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -40,7 +40,7 @@ .macro __init_el2_debug mrs x1, id_aa64dfr0_el1 - sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 + sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 cmp x0, #1 b.lt .Lskip_pmu_\@ // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps @@ -49,7 +49,7 @@ csel x2, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ - ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, @@ -65,7 +65,7 @@ .Lskip_spe_\@: /* Trace buffer */ - ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 + ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present mrs_s x0, SYS_TRBIDR_EL1 @@ -137,7 +137,7 @@ mov x0, xzr mrs x1, id_aa64dfr0_el1 - ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 + ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cmp x1, #3 b.lt .Lset_debug_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d667c03d5f5e..fa4c6ff3aa9b 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -142,7 +142,7 @@ static inline int get_num_brps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_BRPs_SHIFT); + ID_AA64DFR0_EL1_BRPs_SHIFT); } /* Determine number of WRP registers available. */ @@ -151,7 +151,7 @@ static inline int get_num_wrps(void) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); return 1 + cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_WRPs_SHIFT); + ID_AA64DFR0_EL1_WRPs_SHIFT); } #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b1e9e4d3d964..a9544561397d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -699,27 +699,27 @@ #endif /* id_aa64dfr0 */ -#define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TraceBuffer_SHIFT 44 -#define ID_AA64DFR0_TraceFilt_SHIFT 40 -#define ID_AA64DFR0_DoubleLock_SHIFT 36 -#define ID_AA64DFR0_PMSVer_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPs_SHIFT 28 -#define ID_AA64DFR0_WRPs_SHIFT 20 -#define ID_AA64DFR0_BRPs_SHIFT 12 -#define ID_AA64DFR0_PMUVer_SHIFT 8 -#define ID_AA64DFR0_TraceVer_SHIFT 4 -#define ID_AA64DFR0_DebugVer_SHIFT 0 - -#define ID_AA64DFR0_PMUVer_8_0 0x1 -#define ID_AA64DFR0_PMUVer_8_1 0x4 -#define ID_AA64DFR0_PMUVer_8_4 0x5 -#define ID_AA64DFR0_PMUVer_8_5 0x6 -#define ID_AA64DFR0_PMUVer_8_7 0x7 -#define ID_AA64DFR0_PMUVer_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVer_8_2 0x1 -#define ID_AA64DFR0_PMSVer_8_3 0x2 +#define ID_AA64DFR0_EL1_MTPMU_SHIFT 48 +#define ID_AA64DFR0_EL1_TraceBuffer_SHIFT 44 +#define ID_AA64DFR0_EL1_TraceFilt_SHIFT 40 +#define ID_AA64DFR0_EL1_DoubleLock_SHIFT 36 +#define ID_AA64DFR0_EL1_PMSVer_SHIFT 32 +#define ID_AA64DFR0_EL1_CTX_CMPs_SHIFT 28 +#define ID_AA64DFR0_EL1_WRPs_SHIFT 20 +#define ID_AA64DFR0_EL1_BRPs_SHIFT 12 +#define ID_AA64DFR0_EL1_PMUVer_SHIFT 8 +#define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 +#define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 + +#define ID_AA64DFR0_EL1_PMUVer_8_0 0x1 +#define ID_AA64DFR0_EL1_PMUVer_8_1 0x4 +#define ID_AA64DFR0_EL1_PMUVer_8_4 0x5 +#define ID_AA64DFR0_EL1_PMUVer_8_5 0x6 +#define ID_AA64DFR0_EL1_PMUVer_8_7 0x7 +#define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf + +#define ID_AA64DFR0_EL1_PMSVer_8_2 0x1 +#define ID_AA64DFR0_EL1_PMSVer_8_3 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c694d6cbf82d..c22732a6908b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0), /* * We can instantiate multiple PMU instances with different levels * of support. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 572c8ac2873c..3da09778267e 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -28,7 +28,7 @@ u8 debug_monitors_arch(void) { return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), - ID_AA64DFR0_DebugVer_SHIFT); + ID_AA64DFR0_EL1_DebugVer_SHIFT); } /* diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 83f7a8980623..8b878837d8f1 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) dfr0 = read_sysreg(id_aa64dfr0_el1); pmuver = cpuid_feature_extract_unsigned_field(dfr0, - ID_AA64DFR0_PMUVer_SHIFT); - if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) + ID_AA64DFR0_EL1_PMUVer_SHIFT); + if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0) return; cpu_pmu->pmuver = pmuver; @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 14878cd55c53..3f7563d768e2 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * If SPE is present on this CPU and is available at current EL, * we may need to check if the host state needs to be saved. */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); } diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index ba1327fac03f..85d3b7ae720f 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) u64 cptr_set = 0; /* Trap/constrain PMU */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK; } /* Trap Debug */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids)) mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; /* Trap OS Double Lock */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids)) mdcr_set |= MDCR_EL2_TDOSA; /* Trap SPE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { mdcr_set |= MDCR_EL2_TPMS; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; } /* Trap Trace Filter */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) cptr_set |= CPTR_EL2_TTA; vcpu->arch.mdcr_el2 |= mdcr_set; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 331478c67dca..7122b5387de6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_PMUVer_8_0: + case ID_AA64DFR0_EL1_PMUVer_8_0: return GENMASK(9, 0); - case ID_AA64DFR0_PMUVer_8_1: - case ID_AA64DFR0_PMUVer_8_4: - case ID_AA64DFR0_PMUVer_8_5: - case ID_AA64DFR0_PMUVer_8_7: + case ID_AA64DFR0_EL1_PMUVer_8_1: + case ID_AA64DFR0_EL1_PMUVer_8_4: + case ID_AA64DFR0_EL1_PMUVer_8_5: + case ID_AA64DFR0_EL1_PMUVer_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) { struct arm_pmu_entry *entry; - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) return; mutex_lock(&arm_pmus_lock); @@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) if (event->pmu) { pmu = to_arm_pmu(event->pmu); if (pmu->pmuver == 0 || - pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) + pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) pmu = NULL; } @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 626d7769e23d..d4546a5b0ea5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, break; case SYS_ID_AA64DFR0_EL1: /* Limit debug to ARMv8.0 */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6); /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVer_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); + ID_AA64DFR0_EL1_PMUVer_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0); /* Hide SPE from guests */ - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); break; case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.4 */ @@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); - p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) + p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20) | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } -- cgit v1.2.3 From 121a8fc088f13c64d9f3c9b3e7faa4c246e0a32c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 10 Sep 2022 17:33:51 +0100 Subject: arm64/sysreg: Use feature numbering for PMU and SPE revisions Currently the kernel refers to the versions of the PMU and SPE features by the version of the architecture where those features were updated but the ARM refers to them using the FEAT_ names for the features. To improve consistency and help with updating for newer features and since v9 will make our current naming scheme a bit more confusing update the macros identfying features to use the FEAT_ based scheme. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220910163354.860255-4-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 14 +++++++------- arch/arm64/kernel/perf_event.c | 4 ++-- arch/arm64/kvm/pmu-emul.c | 12 ++++++------ arch/arm64/kvm/sys_regs.c | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a9544561397d..aea3ec657c3f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -711,15 +711,15 @@ #define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 #define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 -#define ID_AA64DFR0_EL1_PMUVer_8_0 0x1 -#define ID_AA64DFR0_EL1_PMUVer_8_1 0x4 -#define ID_AA64DFR0_EL1_PMUVer_8_4 0x5 -#define ID_AA64DFR0_EL1_PMUVer_8_5 0x6 -#define ID_AA64DFR0_EL1_PMUVer_8_7 0x7 +#define ID_AA64DFR0_EL1_PMUVer_IMP 0x1 +#define ID_AA64DFR0_EL1_PMUVer_V3P1 0x4 +#define ID_AA64DFR0_EL1_PMUVer_V3P4 0x5 +#define ID_AA64DFR0_EL1_PMUVer_V3P5 0x6 +#define ID_AA64DFR0_EL1_PMUVer_V3P7 0x7 #define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf -#define ID_AA64DFR0_EL1_PMSVer_8_2 0x1 -#define ID_AA64DFR0_EL1_PMSVer_8_3 0x2 +#define ID_AA64DFR0_EL1_PMSVer_IMP 0x1 +#define ID_AA64DFR0_EL1_PMSVer_V1P1 0x2 #define ID_DFR0_PERFMON_SHIFT 24 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 8b878837d8f1..7b0643fe2f13 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) { - return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5); + return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5); } static inline bool armv8pmu_event_has_user_read(struct perf_event *event) @@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); /* store PMMIR_EL1 register for sysfs */ - if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) + if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31))) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); else cpu_pmu->reg_pmmir = 0; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 7122b5387de6..0003c7d37533 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) pmuver = kvm->arch.arm_pmu->pmuver; switch (pmuver) { - case ID_AA64DFR0_EL1_PMUVer_8_0: + case ID_AA64DFR0_EL1_PMUVer_IMP: return GENMASK(9, 0); - case ID_AA64DFR0_EL1_PMUVer_8_1: - case ID_AA64DFR0_EL1_PMUVer_8_4: - case ID_AA64DFR0_EL1_PMUVer_8_5: - case ID_AA64DFR0_EL1_PMUVer_8_7: + case ID_AA64DFR0_EL1_PMUVer_V3P1: + case ID_AA64DFR0_EL1_PMUVer_V3P4: + case ID_AA64DFR0_EL1_PMUVer_V3P5: + case ID_AA64DFR0_EL1_PMUVer_V3P7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); @@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d4546a5b0ea5..2ef1121ab844 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1115,7 +1115,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, ID_AA64DFR0_EL1_PMUVer_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0); + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0); /* Hide SPE from guests */ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); break; -- cgit v1.2.3 From 34bbfdfb146be3ad485cbe526b2a1e9ce220649c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:04 +0100 Subject: arm64: alternatives: kvm: prepare for cap changes The KVM patching callbacks use cpus_have_final_cap() internally within has_vhe(), and subsequent patches will make it invalid to call cpus_have_final_cap() before alternatives patching has completed, and will mean that cpus_have_const_cap() will always fall back to dynamic checks prior to alternatives patching. In preparation for said change, this patch modifies the KVM patching callbacks to use cpus_have_cap() directly. This is not subject to patching, and will dynamically check the cpu_hwcaps array, which is functionally equivalent to the existing behaviour. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kvm/va_layout.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index acdb7b3cc97d..91b22a014610 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt, * dictates it and we don't have any spare bits in the * address), NOP everything after masking the kernel VA. */ - if (has_vhe() || (!tag_val && i > 0)) { + if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) { updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); continue; } @@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, BUG_ON(nr_inst != 4); - if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe())) + if (!cpus_have_cap(ARM64_SPECTRE_V3A) || + WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))) return; /* -- cgit v1.2.3 From 4c0bd995d73ed8897650095c7892b132a0bd66a4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:08 +0100 Subject: arm64: alternatives: have callbacks take a cap Today, callback alternatives are special-cased within __apply_alternatives(), and are applied alongside patching for system capabilities as ARM64_NCAPS is not part of the boot_capabilities feature mask. This special-casing is less than ideal. Giving special meaning to ARM64_NCAPS for this requires some structures and loops to use ARM64_NCAPS + 1 (AKA ARM64_NPATCHABLE), while others use ARM64_NCAPS. It's also not immediately clear callback alternatives are only applied when applying alternatives for system-wide features. To make this a bit clearer, changes the way that callback alternatives are identified to remove the special-casing of ARM64_NCAPS, and to allow callback alternatives to be associated with a cpucap as with all other alternatives. New cpucaps, ARM64_ALWAYS_BOOT and ARM64_ALWAYS_SYSTEM are added which are always detected alongside boot cpu capabilities and system capabilities respectively. All existing callback alternatives are made to use ARM64_ALWAYS_SYSTEM, and so will be patched at the same point during the boot flow as before. Subsequent patches will make more use of these new cpucaps. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative-macros.h | 18 ++++++++++++------ arch/arm64/include/asm/assembler.h | 10 +++++----- arch/arm64/include/asm/cpufeature.h | 4 +--- arch/arm64/include/asm/kvm_mmu.h | 5 +++-- arch/arm64/kernel/alternative.c | 26 ++++++++++++++------------ arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++-- arch/arm64/kernel/entry.S | 8 ++++---- arch/arm64/kvm/hyp/hyp-entry.S | 4 ++-- arch/arm64/tools/cpucaps | 2 ++ 9 files changed, 60 insertions(+), 36 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 7e157ab6cd50..189c31be163c 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -2,10 +2,16 @@ #ifndef __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H +#include + #include #include -#define ARM64_CB_PATCH ARM64_NCAPS +#define ARM64_CB_BIT (UL(1) << 15) + +#if ARM64_NCAPS >= ARM64_CB_BIT +#error "cpucaps have overflown ARM64_CB_BIT" +#endif #ifndef __ASSEMBLY__ @@ -73,8 +79,8 @@ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) +#define ALTERNATIVE_CB(oldinstr, feature, cb) \ + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_BIT | (feature), 1, cb) #else #include @@ -82,7 +88,7 @@ .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .word \orig_offset - . .word \alt_offset - . - .hword \feature + .hword (\feature) .byte \orig_len .byte \alt_len .endm @@ -141,10 +147,10 @@ 661: .endm -.macro alternative_cb cb +.macro alternative_cb cap, cb .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + altinstruction_entry 661f, \cb, ARM64_CB_BIT | \cap, 662f-661f, 0 .popsection 661: .endm diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5846145be523..d851e2733439 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -293,7 +293,7 @@ alternative_endif alternative_if_not ARM64_KVM_PROTECTED_MODE ASM_BUG() alternative_else_nop_endif -alternative_cb kvm_compute_final_ctr_el0 +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0 movz \reg, #0 movk \reg, #0, lsl #16 movk \reg, #0, lsl #32 @@ -877,7 +877,7 @@ alternative_endif .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_iter +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: @@ -890,7 +890,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable b .L_spectre_bhb_loop_done\@ // Patched to NOP alternative_cb_end __mitigate_spectre_bhb_loop \tmp @@ -904,7 +904,7 @@ alternative_cb_end stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 @@ -914,7 +914,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_clear_insn #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_clearbhb +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb /* Patched to NOP when not supported */ clearbhb isb diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 630c337c7774..f5852ad6e884 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -422,9 +422,7 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; -/* ARM64 CAPS + alternative_cb */ -#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) -extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b208da3bebec..7784081088e7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -63,7 +63,7 @@ * specific registers encoded in the instructions). */ .macro kern_hyp_va reg -alternative_cb kvm_update_va_mask +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask and \reg, \reg, #1 /* mask with va_mask */ ror \reg, \reg, #1 /* rotate to the first tag bit */ add \reg, \reg, #0 /* insert the low 12 bits of the tag */ @@ -97,7 +97,7 @@ alternative_cb_end hyp_pa \reg, \tmp /* Load kimage_voffset. */ -alternative_cb kvm_get_kimage_voffset +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 @@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) "add %0, %0, #0\n" "add %0, %0, #0, lsl 12\n" "ror %0, %0, #63\n", + ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); return v; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 2e18c9c0f612..9a071a5fcb67 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -21,6 +21,9 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) +#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT) +#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT) + /* Volatile, as we may be patching the guts of READ_ONCE() */ static volatile int all_alternatives_applied; @@ -143,16 +146,15 @@ static void __nocfi __apply_alternatives(const struct alt_region *region, for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + int cap = ALT_CAP(alt); - if (!test_bit(alt->cpufeature, feature_mask)) + if (!test_bit(cap, feature_mask)) continue; - /* Use ARM64_CB_PATCH as an unconditional patch */ - if (alt->cpufeature < ARM64_CB_PATCH && - !cpus_have_cap(alt->cpufeature)) + if (!cpus_have_cap(cap)) continue; - if (alt->cpufeature == ARM64_CB_PATCH) + if (ALT_HAS_CB(alt)) BUG_ON(alt->alt_len != 0); else BUG_ON(alt->alt_len != alt->orig_len); @@ -161,10 +163,10 @@ static void __nocfi __apply_alternatives(const struct alt_region *region, updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; - if (alt->cpufeature < ARM64_CB_PATCH) - alt_cb = patch_alternative; - else + if (ALT_HAS_CB(alt)) alt_cb = ALT_REPL_PTR(alt); + else + alt_cb = patch_alternative; alt_cb(alt, origptr, updptr, nr_inst); @@ -208,10 +210,10 @@ static int __apply_alternatives_multi_stop(void *unused) cpu_relax(); isb(); } else { - DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); bitmap_complement(remaining_capabilities, boot_capabilities, - ARM64_NPATCHABLE); + ARM64_NCAPS); BUG_ON(all_alternatives_applied); __apply_alternatives(&kernel_alternatives, false, @@ -254,9 +256,9 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; - DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); - bitmap_fill(all_capabilities, ARM64_NPATCHABLE); + bitmap_fill(all_capabilities, ARM64_NCAPS); __apply_alternatives(®ion, true, &all_capabilities[0]); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af4de817d712..68a0545285a1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; -/* Need also bit for ARM64_CB_PATCH */ -DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); bool arm64_use_ng_mappings = false; EXPORT_SYMBOL(arm64_use_ng_mappings); @@ -1391,6 +1390,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id) #include +static bool +has_always(const struct arm64_cpu_capabilities *entry, int scope) +{ + return true; +} + static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { @@ -2087,6 +2092,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) } static const struct arm64_cpu_capabilities arm64_features[] = { + { + .capability = ARM64_ALWAYS_BOOT, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_always, + }, + { + .capability = ARM64_ALWAYS_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_always, + }, { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2d73b3e793b2..e28137d64b76 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -114,7 +114,7 @@ * them if required. */ .macro apply_ssbd, state, tmp1, tmp2 -alternative_cb spectre_v4_patch_fw_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable b .L__asm_ssbd_skip\@ // Patched to NOP alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 @@ -123,7 +123,7 @@ alternative_cb_end tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: @@ -175,7 +175,7 @@ alternative_else_nop_endif .macro mte_set_kernel_gcr, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end mov \tmp, KERNEL_GCR_EL1 @@ -186,7 +186,7 @@ alternative_cb_end .macro mte_set_user_gcr, tsk, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end ldr \tmp, [\tsk, #THREAD_MTE_CTRL] diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 7839d075729b..8f3f93fa119e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -196,7 +196,7 @@ SYM_CODE_END(__kvm_hyp_vector) sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] - alternative_cb spectre_bhb_patch_wa3 + alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3 /* Patched to mov WA3 when supported */ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 alternative_cb_end @@ -216,7 +216,7 @@ SYM_CODE_END(__kvm_hyp_vector) mitigate_spectre_bhb_clear_insn .endif .if \indirect != 0 - alternative_cb kvm_patch_vector_branch + alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch /* * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: * diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 63b2484ce6c3..ff882ec175e7 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -2,6 +2,8 @@ # # Internal CPU capabilities constants, keep this list sorted +ALWAYS_BOOT +ALWAYS_SYSTEM BTI # Unreliable: use system_supports_32bit_el0() instead. HAS_32BIT_EL0_DO_NOT_USE -- cgit v1.2.3