diff options
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/alternative-macros.h | 66 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/lse.h | 5 |
5 files changed, 76 insertions, 25 deletions
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 7e157ab6cd50..b5ac0b85c9bb 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -2,10 +2,22 @@ #ifndef __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H +#include <linux/bits.h> +#include <linux/const.h> + #include <asm/cpucaps.h> #include <asm/insn-def.h> -#define ARM64_CB_PATCH ARM64_NCAPS +/* + * Binutils 2.27.0 can't handle a 'UL' suffix on constants, so for the assembly + * macros below we must use we must use `(1 << ARM64_CB_SHIFT)`. + */ +#define ARM64_CB_SHIFT 15 +#define ARM64_CB_BIT BIT(ARM64_CB_SHIFT) + +#if ARM64_NCAPS >= ARM64_CB_BIT +#error "cpucaps have overflown ARM64_CB_BIT" +#endif #ifndef __ASSEMBLY__ @@ -73,8 +85,8 @@ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) +#define ALTERNATIVE_CB(oldinstr, feature, cb) \ + __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb) #else #include <asm/assembler.h> @@ -82,7 +94,7 @@ .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .word \orig_offset - . .word \alt_offset - . - .hword \feature + .hword (\feature) .byte \orig_len .byte \alt_len .endm @@ -141,10 +153,10 @@ 661: .endm -.macro alternative_cb cb +.macro alternative_cb cap, cb .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + altinstruction_entry 661f, \cb, (1 << ARM64_CB_SHIFT) | \cap, 662f-661f, 0 .popsection 661: .endm @@ -207,4 +219,46 @@ alternative_endif #define ALTERNATIVE(oldinstr, newinstr, ...) \ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +static __always_inline bool +alternative_has_feature_likely(unsigned long feature) +{ + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + + asm_volatile_goto( + ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) + : + : [feature] "i" (feature) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool +alternative_has_feature_unlikely(unsigned long feature) +{ + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + + asm_volatile_goto( + ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) + : + : [feature] "i" (feature) + : + : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_ALTERNATIVE_MACROS_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index cf8e72e733de..e5957a53be39 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -293,7 +293,7 @@ alternative_endif alternative_if_not ARM64_KVM_PROTECTED_MODE ASM_BUG() alternative_else_nop_endif -alternative_cb kvm_compute_final_ctr_el0 +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0 movz \reg, #0 movk \reg, #0, lsl #16 movk \reg, #0, lsl #32 @@ -877,7 +877,7 @@ alternative_endif .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_iter +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: @@ -890,7 +890,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable b .L_spectre_bhb_loop_done\@ // Patched to NOP alternative_cb_end __mitigate_spectre_bhb_loop \tmp @@ -904,7 +904,7 @@ alternative_cb_end stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 @@ -914,7 +914,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_clear_insn #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_clearbhb +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb /* Patched to NOP when not supported */ clearbhb isb diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a08672286b4c..f73f11b55042 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -6,6 +6,7 @@ #ifndef __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H +#include <asm/alternative-macros.h> #include <asm/cpucaps.h> #include <asm/cputype.h> #include <asm/hwcap.h> @@ -419,12 +420,8 @@ static __always_inline bool is_hyp_code(void) } extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; -extern struct static_key_false arm64_const_caps_ready; -/* ARM64 CAPS + alternative_cb */ -#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) -extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) @@ -440,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void); static __always_inline bool system_capabilities_finalized(void) { - return static_branch_likely(&arm64_const_caps_ready); + return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM); } /* @@ -448,11 +445,11 @@ static __always_inline bool system_capabilities_finalized(void) * * Before the capability is detected, this returns false. */ -static inline bool cpus_have_cap(unsigned int num) +static __always_inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) return false; - return test_bit(num, cpu_hwcaps); + return arch_test_bit(num, cpu_hwcaps); } /* @@ -467,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return alternative_has_feature_unlikely(num); } /* diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b208da3bebec..7784081088e7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -63,7 +63,7 @@ * specific registers encoded in the instructions). */ .macro kern_hyp_va reg -alternative_cb kvm_update_va_mask +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask and \reg, \reg, #1 /* mask with va_mask */ ror \reg, \reg, #1 /* rotate to the first tag bit */ add \reg, \reg, #0 /* insert the low 12 bits of the tag */ @@ -97,7 +97,7 @@ alternative_cb_end hyp_pa \reg, \tmp /* Load kimage_voffset. */ -alternative_cb kvm_get_kimage_voffset +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 @@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) "add %0, %0, #0\n" "add %0, %0, #0, lsl 12\n" "ror %0, %0, #63\n", + ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); return v; diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 29c85810ae69..c503db8e73b0 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -13,14 +13,13 @@ #include <linux/jump_label.h> #include <linux/stringify.h> #include <asm/alternative.h> +#include <asm/alternative-macros.h> #include <asm/atomic_lse.h> #include <asm/cpucaps.h> -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; - static __always_inline bool system_uses_lse_atomics(void) { - return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); + return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS); } #define __lse_ll_sc_body(op, ...) \ |