diff options
Diffstat (limited to 'arch/arm64/kernel/cpufeature.c')
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 314 |
1 files changed, 230 insertions, 84 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af50064dea51..4f272399de89 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -52,6 +52,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); +static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; /* * Flag to indicate if we have computed the system wide @@ -141,9 +142,18 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -518,6 +528,29 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) } extern const struct arm64_cpu_capabilities arm64_errata[]; +static const struct arm64_cpu_capabilities arm64_features[]; + +static void __init +init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) +{ + for (; caps->matches; caps++) { + if (WARN(caps->capability >= ARM64_NCAPS, + "Invalid capability %d\n", caps->capability)) + continue; + if (WARN(cpu_hwcaps_ptrs[caps->capability], + "Duplicate entry for capability %d\n", + caps->capability)) + continue; + cpu_hwcaps_ptrs[caps->capability] = caps; + } +} + +static void __init init_cpu_hwcaps_indirect_list(void) +{ + init_cpu_hwcaps_indirect_list_from_array(arm64_features); + init_cpu_hwcaps_indirect_list_from_array(arm64_errata); +} + static void __init setup_boot_cpu_capabilities(void); void __init init_cpu_features(struct cpuinfo_arm64 *info) @@ -564,6 +597,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) } /* + * Initialize the indirect array of CPU hwcaps capabilities pointers + * before we handle the boot CPU below. + */ + init_cpu_hwcaps_indirect_list(); + + /* * Detect and enable early CPU capabilities based on the boot CPU, * after we have initialised the CPU feature infrastructure. */ @@ -915,6 +954,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), { /* sentinel */ } }; char const *str = "command line option"; @@ -1145,6 +1190,14 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) } #endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_ARM64_PTR_AUTH +static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) +{ + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); +} +#endif /* CONFIG_ARM64_PTR_AUTH */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1333,7 +1386,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_hw_dbm, }, #endif -#ifdef CONFIG_ARM64_SSBD { .desc = "CRC32 instructions", .capability = ARM64_HAS_CRC32, @@ -1343,6 +1395,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR0_CRC32_SHIFT, .min_field_value = 1, }, +#ifdef CONFIG_ARM64_SSBD { .desc = "Speculative Store Bypassing Safe (SSBS)", .capability = ARM64_SSBS, @@ -1368,22 +1421,115 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_cnp, }, #endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .field_pos = ID_AA64ISAR1_SB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#ifdef CONFIG_ARM64_PTR_AUTH + { + .desc = "Address authentication (architected algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_APA_SHIFT, + .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_address_auth, + }, + { + .desc = "Address authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_API_SHIFT, + .min_field_value = ID_AA64ISAR1_API_IMP_DEF, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_address_auth, + }, + { + .desc = "Generic authentication (architected algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_ARCH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_GPA_SHIFT, + .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED, + .matches = has_cpuid_feature, + }, + { + .desc = "Generic authentication (IMP DEF algorithm)", + .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_GPI_SHIFT, + .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, + .matches = has_cpuid_feature, + }, +#endif /* CONFIG_ARM64_PTR_AUTH */ {}, }; -#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ - { \ - .desc = #cap, \ - .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ - .matches = has_cpuid_feature, \ - .sys_reg = reg, \ - .field_pos = field, \ - .sign = s, \ - .min_field_value = min_value, \ - .hwcap_type = cap_type, \ - .hwcap = cap, \ +#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ + .matches = has_cpuid_feature, \ + .sys_reg = reg, \ + .field_pos = field, \ + .sign = s, \ + .min_field_value = min_value, + +#define __HWCAP_CAP(name, cap_type, cap) \ + .desc = name, \ + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ + .hwcap_type = cap_type, \ + .hwcap = cap, \ + +#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + HWCAP_CPUID_MATCH(reg, field, s, min_value) \ + } + +#define HWCAP_MULTI_CAP(list, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = cpucap_multi_entry_cap_matches, \ + .match_list = list, \ } +#ifdef CONFIG_ARM64_PTR_AUTH +static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED) + }, + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF) + }, + {}, +}; + +static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED) + }, + { + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT, + FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF) + }, + {}, +}; +#endif + static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), @@ -1409,11 +1555,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), #endif HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), +#ifdef CONFIG_ARM64_PTR_AUTH + HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA), + HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG), +#endif {}, }; @@ -1482,52 +1633,46 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) cap_set_elf_hwcap(hwcaps); } -/* - * Check if the current CPU has a given feature capability. - * Should be called from non-preemptible context. - */ -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, - unsigned int cap) +static void update_cpu_capabilities(u16 scope_mask) { + int i; const struct arm64_cpu_capabilities *caps; - if (WARN_ON(preemptible())) - return false; - - for (caps = cap_array; caps->matches; caps++) - if (caps->capability == cap) - return caps->matches(caps, SCOPE_LOCAL_CPU); - - return false; -} - -static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - u16 scope_mask, const char *info) -{ scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - if (!(caps->type & scope_mask) || + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask) || + cpus_have_cap(caps->capability) || !caps->matches(caps, cpucap_default_scope(caps))) continue; - if (!cpus_have_cap(caps->capability) && caps->desc) - pr_info("%s %s\n", info, caps->desc); + if (caps->desc) + pr_info("detected: %s\n", caps->desc); cpus_set_cap(caps->capability); } } -static void update_cpu_capabilities(u16 scope_mask) +/* + * Enable all the available capabilities on this CPU. The capabilities + * with BOOT_CPU scope are handled separately and hence skipped here. + */ +static int cpu_enable_non_boot_scope_capabilities(void *__unused) { - __update_cpu_capabilities(arm64_errata, scope_mask, - "enabling workaround for"); - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); -} + int i; + u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; -static int __enable_cpu_capability(void *arg) -{ - const struct arm64_cpu_capabilities *cap = arg; + for_each_available_cap(i) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i]; + + if (WARN_ON(!cap)) + continue; - cap->cpu_enable(cap); + if (!(cap->type & non_boot_scope)) + continue; + + if (cap->cpu_enable) + cap->cpu_enable(cap); + } return 0; } @@ -1535,21 +1680,29 @@ static int __enable_cpu_capability(void *arg) * Run through the enabled capabilities and enable() it on all active * CPUs */ -static void __init -__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, - u16 scope_mask) +static void __init enable_cpu_capabilities(u16 scope_mask) { + int i; + const struct arm64_cpu_capabilities *caps; + bool boot_scope; + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - unsigned int num = caps->capability; + boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); - if (!(caps->type & scope_mask) || !cpus_have_cap(num)) + for (i = 0; i < ARM64_NCAPS; i++) { + unsigned int num; + + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) + continue; + num = caps->capability; + if (!cpus_have_cap(num)) continue; /* Ensure cpus_have_const_cap(num) works */ static_branch_enable(&cpu_hwcap_keys[num]); - if (caps->cpu_enable) { + if (boot_scope && caps->cpu_enable) /* * Capabilities with SCOPE_BOOT_CPU scope are finalised * before any secondary CPU boots. Thus, each secondary @@ -1558,25 +1711,19 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, * the boot CPU, for which the capability must be * enabled here. This approach avoids costly * stop_machine() calls for this case. - * - * Otherwise, use stop_machine() as it schedules the - * work allowing us to modify PSTATE, instead of - * on_each_cpu() which uses an IPI, giving us a PSTATE - * that disappears when we return. */ - if (scope_mask & SCOPE_BOOT_CPU) - caps->cpu_enable(caps); - else - stop_machine(__enable_cpu_capability, - (void *)caps, cpu_online_mask); - } + caps->cpu_enable(caps); } -} -static void __init enable_cpu_capabilities(u16 scope_mask) -{ - __enable_cpu_capabilities(arm64_errata, scope_mask); - __enable_cpu_capabilities(arm64_features, scope_mask); + /* + * For all non-boot scope capabilities, use stop_machine() + * as it schedules the work allowing us to modify PSTATE, + * instead of on_each_cpu() which uses an IPI, giving us a + * PSTATE that disappears when we return. + */ + if (!boot_scope) + stop_machine(cpu_enable_non_boot_scope_capabilities, + NULL, cpu_online_mask); } /* @@ -1586,16 +1733,17 @@ static void __init enable_cpu_capabilities(u16 scope_mask) * * Returns "false" on conflicts. */ -static bool -__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, - u16 scope_mask) +static bool verify_local_cpu_caps(u16 scope_mask) { + int i; bool cpu_has_cap, system_has_cap; + const struct arm64_cpu_capabilities *caps; scope_mask &= ARM64_CPUCAP_SCOPE_MASK; - for (; caps->matches; caps++) { - if (!(caps->type & scope_mask)) + for (i = 0; i < ARM64_NCAPS; i++) { + caps = cpu_hwcaps_ptrs[i]; + if (!caps || !(caps->type & scope_mask)) continue; cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); @@ -1626,7 +1774,7 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, } } - if (caps->matches) { + if (i < ARM64_NCAPS) { pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", smp_processor_id(), caps->capability, caps->desc, system_has_cap, cpu_has_cap); @@ -1636,12 +1784,6 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, return true; } -static bool verify_local_cpu_caps(u16 scope_mask) -{ - return __verify_local_cpu_caps(arm64_errata, scope_mask) && - __verify_local_cpu_caps(arm64_features, scope_mask); -} - /* * Check for CPU features that are used in early boot * based on the Boot CPU value. @@ -1750,12 +1892,16 @@ static void __init mark_const_caps_ready(void) static_branch_enable(&arm64_const_caps_ready); } -extern const struct arm64_cpu_capabilities arm64_errata[]; - -bool this_cpu_has_cap(unsigned int cap) +bool this_cpu_has_cap(unsigned int n) { - return (__this_cpu_has_cap(arm64_features, cap) || - __this_cpu_has_cap(arm64_errata, cap)); + if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_LOCAL_CPU); + } + + return false; } static void __init setup_system_capabilities(void) |