From 3ff047f6971d3c3aefd1b8972ac94a4301a70902 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:48 +0530 Subject: arm64: cpufeature: Fix meta-capability cpufeature check Some existing/future meta cpucaps match need the presence of individual cpucaps. Currently the individual cpucaps checks it via an array based flag and this introduces dependency on the array entry order. This limitation exists only for system scope cpufeature. This patch introduces an internal helper function (__system_matches_cap) to invoke the matching handler for system scope. This helper has to be used during a narrow window when, - The system wide safe registers are set with all the SMP CPUs and, - The SYSTEM_FEATURE cpu_hwcaps may not have been set. Normal users should use the existing cpus_have_{const_}cap() global function. Suggested-by: Suzuki K Poulose Suggested-by: Catalin Marinas Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0b6715625cf6..4f2e95e6ecd1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); +static bool __system_matches_cap(unsigned int n); + /* * NOTE: Any changes to the visibility of features should be kept in * sync with the documentation of the CPU feature register ABI. @@ -2146,6 +2148,23 @@ bool this_cpu_has_cap(unsigned int n) return false; } +/* + * This helper function is used in a narrow window when, + * - The system wide safe registers are set with all the SMP CPUs and, + * - The SYSTEM_FEATURE cpu_hwcaps may not have been set. + * In all other cases cpus_have_{const_}cap() should be used. + */ +static bool __system_matches_cap(unsigned int n) +{ + if (n < ARM64_NCAPS) { + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; + + if (cap) + return cap->matches(cap, SCOPE_SYSTEM); + } + return false; +} + void cpu_set_feature(unsigned int num) { WARN_ON(num >= MAX_CPU_FEATURES); @@ -2218,7 +2237,7 @@ void __init setup_cpu_features(void) static bool __maybe_unused cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) { - return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); + return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO)); } static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) -- cgit v1.2.3 From cfef06bd0686a578aa53e039c9aec0b1a5581d3b Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:49 +0530 Subject: arm64: cpufeature: add pointer auth meta-capabilities To enable pointer auth for the kernel, we're going to need to check for the presence of address auth and generic auth using alternative_if. We currently have two cpucaps for each, but alternative_if needs to check a single cpucap. So define meta-capabilities that are present when either of the current two capabilities is present. Leave the existing four cpucaps in place, as they are still needed to check for mismatched systems where one CPU has the architected algorithm but another has the IMP DEF algorithm. Note, the meta-capabilities were present before but were removed in commit a56005d32105 ("arm64: cpufeature: Reduce number of pointer auth CPU caps from 6 to 4") and commit 1e013d06120c ("arm64: cpufeature: Rework ptr auth hwcaps using multi_entry_cap_matches"), as they were not needed then. Note, unlike before, the current patch checks the cpucap values directly, instead of reading the CPU ID register value. Reviewed-by: Suzuki K Poulose Reviewed-by: Kees Cook Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: commit message and macro rebase, use __system_matches_cap] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpucaps.h | 4 +++- arch/arm64/include/asm/cpufeature.h | 6 ++---- arch/arm64/kernel/cpufeature.c | 25 ++++++++++++++++++++++++- 3 files changed, 29 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 865e0253fc1e..72e4e0580ddb 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -58,7 +58,9 @@ #define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_HAS_E0PD 49 #define ARM64_HAS_RNG 50 +#define ARM64_HAS_ADDRESS_AUTH 51 +#define ARM64_HAS_GENERIC_AUTH 52 -#define ARM64_NCAPS 51 +#define ARM64_NCAPS 53 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a..8c8048372c8e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -590,15 +590,13 @@ static inline bool system_supports_cnp(void) static inline bool system_supports_address_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH); } static inline bool system_supports_generic_auth(void) { return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && - (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || - cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); + cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); } static inline bool system_uses_irq_prio_masking(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4f2e95e6ecd1..01f50f043831 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1323,6 +1323,20 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); } + +static bool has_address_auth(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || + __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF); +} + +static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || + __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF); +} #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_E0PD @@ -1600,7 +1614,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR1_APA_SHIFT, .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, .matches = has_cpuid_feature, - .cpu_enable = cpu_enable_address_auth, }, { .desc = "Address authentication (IMP DEF algorithm)", @@ -1611,6 +1624,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR1_API_SHIFT, .min_field_value = ID_AA64ISAR1_API_IMP_DEF, .matches = has_cpuid_feature, + }, + { + .capability = ARM64_HAS_ADDRESS_AUTH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_address_auth, .cpu_enable = cpu_enable_address_auth, }, { @@ -1633,6 +1651,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, .matches = has_cpuid_feature, }, + { + .capability = ARM64_HAS_GENERIC_AUTH, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_generic_auth, + }, #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_PSEUDO_NMI { -- cgit v1.2.3 From 91a1b6ccff323e60615e3118eceb2d8cbc4f69ab Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:50 +0530 Subject: arm64: rename ptrauth key structures to be user-specific We currently enable ptrauth for userspace, but do not use it within the kernel. We're going to enable it for the kernel, and will need to manage a separate set of ptrauth keys for the kernel. We currently keep all 5 keys in struct ptrauth_keys. However, as the kernel will only need to use 1 key, it is a bit wasteful to allocate a whole ptrauth_keys struct for every thread. Therefore, a subsequent patch will define a separate struct, with only 1 key, for the kernel. In preparation for that, rename the existing struct (and associated macros and functions) to reflect that they are specific to userspace. Acked-by: Catalin Marinas Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Re-positioned the patch to reduce the diff] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pointer_auth.h | 12 ++++++------ arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/pointer_auth.c | 8 ++++---- arch/arm64/kernel/ptrace.c | 16 ++++++++-------- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 7a24bad1a58b..799b079e69a5 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -22,7 +22,7 @@ struct ptrauth_key { * We give each process its own keys, which are shared by all threads. The keys * are inherited upon fork(), and reinitialised upon exec*(). */ -struct ptrauth_keys { +struct ptrauth_keys_user { struct ptrauth_key apia; struct ptrauth_key apib; struct ptrauth_key apda; @@ -30,7 +30,7 @@ struct ptrauth_keys { struct ptrauth_key apga; }; -static inline void ptrauth_keys_init(struct ptrauth_keys *keys) +static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { get_random_bytes(&keys->apia, sizeof(keys->apia)); @@ -50,7 +50,7 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_switch(struct ptrauth_keys *keys) +static inline void ptrauth_keys_switch_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { __ptrauth_key_install(APIA, keys->apia); @@ -80,12 +80,12 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) #define ptrauth_thread_init_user(tsk) \ do { \ struct task_struct *__ptiu_tsk = (tsk); \ - ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \ - ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \ + ptrauth_keys_init_user(&__ptiu_tsk->thread.keys_user); \ + ptrauth_keys_switch_user(&__ptiu_tsk->thread.keys_user); \ } while (0) #define ptrauth_thread_switch(tsk) \ - ptrauth_keys_switch(&(tsk)->thread.keys_user) + ptrauth_keys_switch_user(&(tsk)->thread.keys_user) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5ba63204d078..496a92873290 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -146,7 +146,7 @@ struct thread_struct { unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ #ifdef CONFIG_ARM64_PTR_AUTH - struct ptrauth_keys keys_user; + struct ptrauth_keys_user keys_user; #endif }; diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index c507b584259d..af5a638207f8 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -9,7 +9,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) { - struct ptrauth_keys *keys = &tsk->thread.keys_user; + struct ptrauth_keys_user *keys = &tsk->thread.keys_user; unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY; unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY; @@ -18,8 +18,8 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) return -EINVAL; if (!arg) { - ptrauth_keys_init(keys); - ptrauth_keys_switch(keys); + ptrauth_keys_init_user(keys); + ptrauth_keys_switch_user(keys); return 0; } @@ -41,7 +41,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); - ptrauth_keys_switch(keys); + ptrauth_keys_switch_user(keys); return 0; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index cd6e5fa48b9c..b3d3005d9515 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey) } static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, - const struct ptrauth_keys *keys) + const struct ptrauth_keys_user *keys) { ukeys->apiakey = pac_key_to_user(&keys->apia); ukeys->apibkey = pac_key_to_user(&keys->apib); @@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, ukeys->apdbkey = pac_key_to_user(&keys->apdb); } -static void pac_address_keys_from_user(struct ptrauth_keys *keys, +static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, const struct user_pac_address_keys *ukeys) { keys->apia = pac_key_from_user(ukeys->apiakey); @@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_address_keys user_keys; if (!system_supports_address_auth()) @@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_address_keys user_keys; int ret; @@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target, } static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, - const struct ptrauth_keys *keys) + const struct ptrauth_keys_user *keys) { ukeys->apgakey = pac_key_to_user(&keys->apga); } -static void pac_generic_keys_from_user(struct ptrauth_keys *keys, +static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, const struct user_pac_generic_keys *ukeys) { keys->apga = pac_key_from_user(ukeys->apgakey); @@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_generic_keys user_keys; if (!system_supports_generic_auth()) @@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct ptrauth_keys *keys = &target->thread.keys_user; + struct ptrauth_keys_user *keys = &target->thread.keys_user; struct user_pac_generic_keys user_keys; int ret; -- cgit v1.2.3 From be129842566599f2c6f8fbba277c098802cd4b3d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:51 +0530 Subject: arm64: install user ptrauth keys at kernel exit time As we're going to enable pointer auth within the kernel and use a different APIAKey for the kernel itself, so move the user APIAKey switch to EL0 exception return. The other 4 keys could remain switched during task switch, but are also moved to keep things consistent. Reviewed-by: Kees Cook Reviewed-by: James Morse Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: commit msg, re-positioned the patch, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 49 +++++++++++++++++++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 23 +-------------- arch/arm64/kernel/asm-offsets.c | 11 +++++++ arch/arm64/kernel/entry.S | 3 ++ arch/arm64/kernel/pointer_auth.c | 3 -- arch/arm64/kernel/process.c | 1 - 6 files changed, 64 insertions(+), 26 deletions(-) create mode 100644 arch/arm64/include/asm/asm_pointer_auth.h (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h new file mode 100644 index 000000000000..3482348ec07f --- /dev/null +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ASM_POINTER_AUTH_H +#define __ASM_ASM_POINTER_AUTH_H + +#include +#include +#include +#include + +#ifdef CONFIG_ARM64_PTR_AUTH +/* + * thread.keys_user.ap* as offset exceeds the #imm offset range + * so use the base value of ldp as thread.keys_user and offset as + * thread.keys_user.ap*. + */ + .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + mov \tmp1, #THREAD_KEYS_USER + add \tmp1, \tsk, \tmp1 +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b .Laddr_auth_skip_\@ +alternative_else_nop_endif + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA] + msr_s SYS_APIAKEYLO_EL1, \tmp2 + msr_s SYS_APIAKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB] + msr_s SYS_APIBKEYLO_EL1, \tmp2 + msr_s SYS_APIBKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA] + msr_s SYS_APDAKEYLO_EL1, \tmp2 + msr_s SYS_APDAKEYHI_EL1, \tmp3 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB] + msr_s SYS_APDBKEYLO_EL1, \tmp2 + msr_s SYS_APDBKEYHI_EL1, \tmp3 +.Laddr_auth_skip_\@: +alternative_if ARM64_HAS_GENERIC_AUTH + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA] + msr_s SYS_APGAKEYLO_EL1, \tmp2 + msr_s SYS_APGAKEYHI_EL1, \tmp3 +alternative_else_nop_endif + .endm + +#else /* CONFIG_ARM64_PTR_AUTH */ + + .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + .endm + +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 799b079e69a5..dabe026ca8ca 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -50,19 +50,6 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_switch_user(struct ptrauth_keys_user *keys) -{ - if (system_supports_address_auth()) { - __ptrauth_key_install(APIA, keys->apia); - __ptrauth_key_install(APIB, keys->apib); - __ptrauth_key_install(APDA, keys->apda); - __ptrauth_key_install(APDB, keys->apdb); - } - - if (system_supports_generic_auth()) - __ptrauth_key_install(APGA, keys->apga); -} - extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -78,20 +65,12 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) } #define ptrauth_thread_init_user(tsk) \ -do { \ - struct task_struct *__ptiu_tsk = (tsk); \ - ptrauth_keys_init_user(&__ptiu_tsk->thread.keys_user); \ - ptrauth_keys_switch_user(&__ptiu_tsk->thread.keys_user); \ -} while (0) - -#define ptrauth_thread_switch(tsk) \ - ptrauth_keys_switch_user(&(tsk)->thread.keys_user) + ptrauth_keys_init_user(&(tsk)->thread.keys_user) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) -#define ptrauth_thread_switch(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index a5bdce8af65b..7b1ea2aece58 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -40,6 +40,9 @@ int main(void) #endif BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); +#endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); @@ -127,6 +130,14 @@ int main(void) #ifdef CONFIG_ARM_SDE_INTERFACE DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs)); DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority)); +#endif +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); + DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib)); + DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); + DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); + DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); + BLANK(); #endif return 0; } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9461d812ae27..684e475bfda0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -341,6 +342,8 @@ alternative_else_nop_endif msr cntkctl_el1, x1 4: #endif + ptrauth_keys_install_user tsk, x0, x1, x2 + apply_ssbd 0, x0, x1 .endif diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index af5a638207f8..1e77736a4f66 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -19,7 +19,6 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (!arg) { ptrauth_keys_init_user(keys); - ptrauth_keys_switch_user(keys); return 0; } @@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); - ptrauth_keys_switch_user(keys); - return 0; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 00626057a384..6140e791bf92 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -512,7 +512,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); - ptrauth_thread_switch(next); ssbs_thread_switch(next); /* -- cgit v1.2.3 From df3551011b8188e7a9291a66c2c0a04c4eb9d8eb Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:52 +0530 Subject: arm64: ptrauth: Add bootup/runtime flags for __cpu_setup This patch allows __cpu_setup to be invoked with one of these flags, ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. This is required as some cpufeatures need different handling during different scenarios. The input parameter in x0 is preserved till the end to be used inside this function. There should be no functional change with this patch and is useful for the subsequent ptrauth patch which utilizes it. Some upcoming arm cpufeatures can also utilize these flags. Suggested-by: James Morse Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Reviewed-by: James Morse Reviewed-by: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/smp.h | 8 ++++++++ arch/arm64/kernel/head.S | 2 ++ arch/arm64/kernel/sleep.S | 2 ++ arch/arm64/mm/proc.S | 26 +++++++++++++++----------- 4 files changed, 27 insertions(+), 11 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b65259..8d66497d8157 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -23,6 +23,14 @@ #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) +/* Possible options for __cpu_setup */ +/* Option to setup primary cpu */ +#define ARM64_CPU_BOOT_PRIMARY (1) +/* Option to setup secondary cpus */ +#define ARM64_CPU_BOOT_SECONDARY (2) +/* Option to setup cpus for different cpu run time services */ +#define ARM64_CPU_RUNTIME (3) + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..797573fe0e9c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -118,6 +118,7 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ + mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) @@ -712,6 +713,7 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva + mov x0, #ARM64_CPU_BOOT_SECONDARY bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index f5b04dd8a710..7b2f2e650c44 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -3,6 +3,7 @@ #include #include #include +#include .text /* @@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed6902411..ea0db1744c29 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -408,30 +408,30 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Input: + * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 + mov x1, #3 << 20 + msr cpacr_el1, x1 // Enable FP/ASIMD + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 /* * Memory region attributes */ mov_q x5, MAIR_EL1_SET msr mair_el1, x5 - /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. @@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + /* + * Prepare SCTLR + */ + mov_q x0, SCTLR_EL1_SET ret // return to head.S SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From 8c176e1625a66d35362d4eac7ceab55c1229b481 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:53 +0530 Subject: arm64: cpufeature: Move cpu capability helpers inside C file These helpers are used only by functions inside cpufeature.c and hence makes sense to be moved from cpufeature.h to cpufeature.c as they are not expected to be used globally. This change helps in reducing the header file size as well as to add future cpu capability types without confusion. Only a cpu capability type macro is sufficient to expose those capabilities globally. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 12 ------------ arch/arm64/kernel/cpufeature.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8c8048372c8e..cea3c1cdf252 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -340,18 +340,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) return cap->type & ARM64_CPUCAP_SCOPE_MASK; } -static inline bool -cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); -} - -static inline bool -cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) -{ - return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); -} - /* * Generic helper for handling capabilties with multiple (match,enable) pairs * of call backs, sharing the same capability bit. diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 01f50f043831..04ecf1cc3306 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1363,6 +1363,19 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, } #endif +/* Internal helper functions to match cpu capability type */ +static bool +cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); +} + +static bool +cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", -- cgit v1.2.3 From deeaac5175a577cbbe1a2319903781d0a7ef7720 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:54 +0530 Subject: arm64: cpufeature: handle conflicts based on capability Each system capability can be of either boot, local, or system scope, depending on when the state of the capability is finalized. When we detect a conflict on a late CPU, we either offline the CPU or panic the system. We currently always panic if the conflict is caused by a boot scope capability, and offline the CPU if the conflict is caused by a local or system scope capability. We're going to want to add a new capability (for pointer authentication) which needs to be boot scope but doesn't need to panic the system when a conflict is detected. So add a new flag to specify whether the capability requires the system to panic or not. Current boot scope capabilities are updated to set the flag, so there should be no functional change as a result of this patch. Signed-off-by: Amit Daniel Kachhap Signed-off-by: Kristina Martsenko Reviewed-by: Vincenzo Frascino Reviewed-by: Suzuki K Poulose Reviewed-by: Kees Cook Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 12 ++++++++++-- arch/arm64/kernel/cpufeature.c | 29 +++++++++++++++-------------- 2 files changed, 25 insertions(+), 16 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cea3c1cdf252..46388e65bbcd 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; * In some non-typical cases either both (a) and (b), or neither, * should be permitted. This can be described by including neither * or both flags in the capability's type field. + * + * In case of a conflict, the CPU is prevented from booting. If the + * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability, + * then a kernel panic is triggered. */ @@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) /* Is it safe for a late CPU to miss this capability when system has it */ #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) +/* Panic when a conflict is detected */ +#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6)) /* * CPU errata workarounds that need to be enabled at boot time if one or @@ -279,9 +285,11 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU feature used early in the boot based on the boot CPU. All secondary - * CPUs must match the state of the capability as detected by the boot CPU. + * CPUs must match the state of the capability as detected by the boot CPU. In + * case of a conflict, a kernel panic is triggered. */ -#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU +#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) struct arm64_cpu_capabilities { const char *desc; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04ecf1cc3306..d6033f45c3cd 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1376,6 +1376,12 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); } +static bool +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) +{ + return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -2018,10 +2024,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask) * Run through the list of capabilities to check for conflicts. * If the system has already detected a capability, take necessary * action on this CPU. - * - * Returns "false" on conflicts. */ -static bool verify_local_cpu_caps(u16 scope_mask) +static void verify_local_cpu_caps(u16 scope_mask) { int i; bool cpu_has_cap, system_has_cap; @@ -2066,10 +2070,12 @@ static bool verify_local_cpu_caps(u16 scope_mask) pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", smp_processor_id(), caps->capability, caps->desc, system_has_cap, cpu_has_cap); - return false; - } - return true; + if (cpucap_panic_on_conflict(caps)) + cpu_panic_kernel(); + else + cpu_die_early(); + } } /* @@ -2079,12 +2085,8 @@ static bool verify_local_cpu_caps(u16 scope_mask) static void check_early_cpu_features(void) { verify_cpu_asid_bits(); - /* - * Early features are used by the kernel already. If there - * is a conflict, we cannot proceed further. - */ - if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) - cpu_panic_kernel(); + + verify_local_cpu_caps(SCOPE_BOOT_CPU); } static void @@ -2132,8 +2134,7 @@ static void verify_local_cpu_capabilities(void) * check_early_cpu_features(), as they need to be verified * on all secondary CPUs. */ - if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) - cpu_die_early(); + verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); verify_local_elf_hwcaps(arm64_elf_hwcaps); -- cgit v1.2.3 From 6982934e19f8ebb4152ba77308facdb1a38533f9 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:55 +0530 Subject: arm64: enable ptrauth earlier When the kernel is compiled with pointer auth instructions, the boot CPU needs to start using address auth very early, so change the cpucap to account for this. Pointer auth must be enabled before we call C functions, because it is not possible to enter a function with pointer auth disabled and exit it with pointer auth enabled. Note, mismatches between architected and IMPDEF algorithms will still be caught by the cpufeature framework (the separate *_ARCH and *_IMP_DEF cpucaps). Note the change in behavior: if the boot CPU has address auth and a late CPU does not, then the late CPU is parked by the cpufeature framework. This is possible as kernel will only have NOP space intructions for PAC so such mismatched late cpu will silently ignore those instructions in C functions. Also, if the boot CPU does not have address auth and the late CPU has then the late cpu will still boot but with ptrauth feature disabled. Leave generic authentication as a "system scope" cpucap for now, since initially the kernel will only use address authentication. Reviewed-by: Kees Cook Reviewed-by: Suzuki K Poulose Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Re-worked ptrauth setup logic, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 6 ++++++ arch/arm64/include/asm/cpufeature.h | 9 +++++++++ arch/arm64/kernel/cpufeature.c | 13 +++---------- arch/arm64/mm/proc.S | 31 +++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b30e884e088..87e2cbb76930 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1515,6 +1515,12 @@ config ARM64_PTR_AUTH be enabled. However, KVM guest also require VHE mode and hence CONFIG_ARM64_VHE=y option to use this feature. + If the feature is present on the boot CPU but not on a late CPU, then + the late CPU will be parked. Also, if the boot CPU does not have + address auth and the late CPU has then the late CPU will still boot + but with the feature disabled. On such a system, this option should + not be selected. + endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 46388e65bbcd..2b5a088053e4 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -291,6 +291,15 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT) +/* + * CPU feature used early in the boot based on the boot CPU. It is safe for a + * late CPU to have this feature even though the boot CPU hasn't enabled it, + * although the feature will not be used by Linux in this case. If the boot CPU + * has enabled this feature already, then every late CPU must have it. + */ +#define ARM64_CPUCAP_BOOT_CPU_FEATURE \ + (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) + struct arm64_cpu_capabilities { const char *desc; u16 capability; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d6033f45c3cd..f6c0cb755107 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1318,12 +1318,6 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) #endif /* CONFIG_ARM64_RAS_EXTN */ #ifdef CONFIG_ARM64_PTR_AUTH -static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) -{ - sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); -} - static bool has_address_auth(const struct arm64_cpu_capabilities *entry, int __unused) { @@ -1627,7 +1621,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Address authentication (architected algorithm)", .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .sys_reg = SYS_ID_AA64ISAR1_EL1, .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_APA_SHIFT, @@ -1637,7 +1631,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "Address authentication (IMP DEF algorithm)", .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .sys_reg = SYS_ID_AA64ISAR1_EL1, .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_API_SHIFT, @@ -1646,9 +1640,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, { .capability = ARM64_HAS_ADDRESS_AUTH, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .matches = has_address_auth, - .cpu_enable = cpu_enable_address_auth, }, { .desc = "Generic authentication (architected algorithm)", diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index ea0db1744c29..4cf19a26af2d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K @@ -468,9 +469,39 @@ SYM_FUNC_START(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + mov x1, x0 /* * Prepare SCTLR */ mov_q x0, SCTLR_EL1_SET + +#ifdef CONFIG_ARM64_PTR_AUTH + /* No ptrauth setup for run time cpus */ + cmp x1, #ARM64_CPU_RUNTIME + b.eq 3f + + /* Check if the CPU supports ptrauth */ + mrs x2, id_aa64isar1_el1 + ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 + cbz x2, 3f + + msr_s SYS_APIAKEYLO_EL1, xzr + msr_s SYS_APIAKEYHI_EL1, xzr + + /* Just enable ptrauth for primary cpu */ + cmp x1, #ARM64_CPU_BOOT_PRIMARY + b.eq 2f + + /* if !system_supports_address_auth() then skip enable */ +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b 3f +alternative_else_nop_endif + +2: /* Enable ptrauth instructions */ + ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB + orr x0, x0, x2 +3: +#endif ret // return to head.S SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From 33e45234987ea3ed4b05fc512f4441696478f12d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:34:56 +0530 Subject: arm64: initialize and switch ptrauth kernel keys Set up keys to use pointer authentication within the kernel. The kernel will be compiled with APIAKey instructions, the other keys are currently unused. Each task is given its own APIAKey, which is initialized during fork. The key is changed during context switch and on kernel entry from EL0. The keys for idle threads need to be set before calling any C functions, because it is not possible to enter and exit a function with different keys. Reviewed-by: Kees Cook Reviewed-by: Catalin Marinas Reviewed-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Modified secondary cores key structure, comments] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 14 ++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 13 +++++++++++++ arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/smp.h | 4 ++++ arch/arm64/kernel/asm-offsets.c | 5 +++++ arch/arm64/kernel/entry.S | 3 +++ arch/arm64/kernel/process.c | 2 ++ arch/arm64/kernel/smp.c | 8 ++++++++ arch/arm64/mm/proc.S | 12 ++++++++++++ 9 files changed, 62 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index 3482348ec07f..d3f4aee42851 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -39,11 +39,25 @@ alternative_if ARM64_HAS_GENERIC_AUTH alternative_else_nop_endif .endm + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 +alternative_if ARM64_HAS_ADDRESS_AUTH + mov \tmp1, #THREAD_KEYS_KERNEL + add \tmp1, \tsk, \tmp1 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA] + msr_s SYS_APIAKEYLO_EL1, \tmp2 + msr_s SYS_APIAKEYHI_EL1, \tmp3 + isb +alternative_else_nop_endif + .endm + #else /* CONFIG_ARM64_PTR_AUTH */ .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 .endm + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .endm + #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index dabe026ca8ca..aa956ca5f2c2 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -30,6 +30,10 @@ struct ptrauth_keys_user { struct ptrauth_key apga; }; +struct ptrauth_keys_kernel { + struct ptrauth_key apia; +}; + static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { @@ -50,6 +54,12 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) +static inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) +{ + if (system_supports_address_auth()) + get_random_bytes(&keys->apia, sizeof(keys->apia)); +} + extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -66,11 +76,14 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) #define ptrauth_thread_init_user(tsk) \ ptrauth_keys_init_user(&(tsk)->thread.keys_user) +#define ptrauth_thread_init_kernel(tsk) \ + ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) +#define ptrauth_thread_init_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 496a92873290..4c77da5dc819 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -147,6 +147,7 @@ struct thread_struct { struct debug_info debug; /* debugging */ #ifdef CONFIG_ARM64_PTR_AUTH struct ptrauth_keys_user keys_user; + struct ptrauth_keys_kernel keys_kernel; #endif }; diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 8d66497d8157..40d5ba029615 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -38,6 +38,7 @@ #include #include #include +#include DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); @@ -95,6 +96,9 @@ asmlinkage void secondary_start_kernel(void); struct secondary_data { void *stack; struct task_struct *task; +#ifdef CONFIG_ARM64_PTR_AUTH + struct ptrauth_keys_kernel ptrauth_key; +#endif long status; }; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7b1ea2aece58..9981a0a5a87f 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -42,6 +42,7 @@ int main(void) DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); + DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); #endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); @@ -91,6 +92,9 @@ int main(void) BLANK(); DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); +#ifdef CONFIG_ARM64_PTR_AUTH + DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key)); +#endif BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); @@ -137,6 +141,7 @@ int main(void) DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); + DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); BLANK(); #endif return 0; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 684e475bfda0..3dad2d000e3c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -178,6 +178,7 @@ alternative_cb_end apply_ssbd 1, x22, x23 + ptrauth_keys_install_kernel tsk, x20, x22, x23 .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -342,6 +343,7 @@ alternative_else_nop_endif msr cntkctl_el1, x1 4: #endif + /* No kernel C function calls after this as user keys are set. */ ptrauth_keys_install_user tsk, x0, x1, x2 apply_ssbd 0, x0, x1 @@ -898,6 +900,7 @@ ENTRY(cpu_switch_to) ldr lr, [x8] mov sp, x9 msr sp_el0, x1 + ptrauth_keys_install_kernel x1, x8, x9, x10 ret ENDPROC(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6140e791bf92..7db0302bec00 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, */ fpsimd_flush_task_state(p); + ptrauth_thread_init_kernel(p); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe..08903413f106 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -112,6 +112,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) */ secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; +#if defined(CONFIG_ARM64_PTR_AUTH) + secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo; + secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi; +#endif update_cpu_boot_status(CPU_MMU_OFF); __flush_dcache_area(&secondary_data, sizeof(secondary_data)); @@ -138,6 +142,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = NULL; secondary_data.stack = NULL; +#if defined(CONFIG_ARM64_PTR_AUTH) + secondary_data.ptrauth_key.apia.lo = 0; + secondary_data.ptrauth_key.apia.hi = 0; +#endif __flush_dcache_area(&secondary_data, sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (ret && status) { diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4cf19a26af2d..5a11a895e923 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -485,6 +485,10 @@ SYM_FUNC_START(__cpu_setup) ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 cbz x2, 3f + /* + * The primary cpu keys are reset here and can be + * re-initialised with some proper values later. + */ msr_s SYS_APIAKEYLO_EL1, xzr msr_s SYS_APIAKEYHI_EL1, xzr @@ -497,6 +501,14 @@ alternative_if_not ARM64_HAS_ADDRESS_AUTH b 3f alternative_else_nop_endif + /* Install ptrauth key for secondary cpus */ + adr_l x2, secondary_data + ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task + cbz x3, 2f // check for slow booting cpus + ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY] + msr_s SYS_APIAKEYLO_EL1, x3 + msr_s SYS_APIAKEYHI_EL1, x4 + 2: /* Enable ptrauth instructions */ ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB -- cgit v1.2.3 From 28321582334c261c13b20d7efe634e610b4c100b Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:57 +0530 Subject: arm64: initialize ptrauth keys for kernel booting task This patch uses the existing boot_init_stack_canary arch function to initialize the ptrauth keys for the booting task in the primary core. The requirement here is that it should be always inline and the caller must never return. As pointer authentication too detects a subset of stack corruption so it makes sense to place this code here. Both pointer authentication and stack canary codes are protected by their respective config option. Suggested-by: Ard Biesheuvel Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pointer_auth.h | 11 ++++++++++- arch/arm64/include/asm/stackprotector.h | 5 +++++ include/linux/stackprotector.h | 2 +- 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index aa956ca5f2c2..833d3f948de0 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -54,12 +54,18 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) -static inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) +static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) { if (system_supports_address_auth()) get_random_bytes(&keys->apia, sizeof(keys->apia)); } +static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys) +{ + if (system_supports_address_auth()) + __ptrauth_key_install(APIA, keys->apia); +} + extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); /* @@ -78,12 +84,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) ptrauth_keys_init_user(&(tsk)->thread.keys_user) #define ptrauth_thread_init_kernel(tsk) \ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) +#define ptrauth_thread_switch_kernel(tsk) \ + ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) #define ptrauth_thread_init_kernel(tsk) +#define ptrauth_thread_switch_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 5884a2b02827..7263e0bac680 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -15,6 +15,7 @@ #include #include +#include extern unsigned long __stack_chk_guard; @@ -26,6 +27,7 @@ extern unsigned long __stack_chk_guard; */ static __always_inline void boot_init_stack_canary(void) { +#if defined(CONFIG_STACKPROTECTOR) unsigned long canary; /* Try to get a semi random initial value. */ @@ -36,6 +38,9 @@ static __always_inline void boot_init_stack_canary(void) current->stack_canary = canary; if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) __stack_chk_guard = current->stack_canary; +#endif + ptrauth_thread_init_kernel(current); + ptrauth_thread_switch_kernel(current); } #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h index 6b792d080eee..4c678c4fec58 100644 --- a/include/linux/stackprotector.h +++ b/include/linux/stackprotector.h @@ -6,7 +6,7 @@ #include #include -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH) # include #else static inline void boot_init_stack_canary(void) -- cgit v1.2.3 From 689eae42afd7a916634146edca38463769969184 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:34:58 +0530 Subject: arm64: mask PAC bits of __builtin_return_address Functions like vmap() record how much memory has been allocated by their callers, and callers are identified using __builtin_return_address(). Once the kernel is using pointer-auth the return address will be signed. This means it will not match any kernel symbol, and will vary between threads even for the same caller. The output of /proc/vmallocinfo in this case may look like, 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0x86e28000100e7c60 pages=4 vmalloc N0=4 0x(____ptrval____)-0x(____ptrval____) 20480 0xc5c78000100e7c60 pages=4 vmalloc N0=4 The above three 64bit values should be the same symbol name and not different LR values. Use the pre-processor to add logic to clear the PAC to __builtin_return_address() callers. This patch adds a new file asm/compiler.h and is transitively included via include/compiler_types.h on the compiler command line so it is guaranteed to be loaded and the users of this macro will not find a wrong version. Helper macros ptrauth_kernel_pac_mask/ptrauth_clear_pac are created for this purpose and added in this file. Existing macro ptrauth_user_pac_mask moved from asm/pointer_auth.h. Signed-off-by: Amit Daniel Kachhap Reviewed-by: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/compiler.h | 24 ++++++++++++++++++++++++ arch/arm64/include/asm/pointer_auth.h | 9 +-------- 3 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 arch/arm64/include/asm/compiler.h (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 87e2cbb76930..115ceea0293e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -118,6 +118,7 @@ config ARM64 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_COMPILER_H select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h new file mode 100644 index 000000000000..eece20d2c55f --- /dev/null +++ b/arch/arm64/include/asm/compiler.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_COMPILER_H +#define __ASM_COMPILER_H + +#if defined(CONFIG_ARM64_PTR_AUTH) + +/* + * The EL0/EL1 pointer bits used by a pointer authentication code. + * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. + */ +#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) +#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) + +/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ +#define ptrauth_clear_pac(ptr) \ + ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ + (ptr & ~ptrauth_user_pac_mask())) + +#define __builtin_return_address(val) \ + (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) + +#endif /* CONFIG_ARM64_PTR_AUTH */ + +#endif /* __ASM_COMPILER_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 833d3f948de0..70c47156e54b 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -68,16 +68,9 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); -/* - * The EL0 pointer bits used by a pointer authentication code. - * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. - */ -#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual) - -/* Only valid for EL0 TTBR0 instruction pointers */ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) { - return ptr & ~ptrauth_user_pac_mask(); + return ptrauth_clear_pac(ptr); } #define ptrauth_thread_init_user(tsk) \ -- cgit v1.2.3 From 04ad99a0b160450ae615e41b839e444eccb5c99b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2020 14:34:59 +0530 Subject: arm64: unwind: strip PAC from kernel addresses When we enable pointer authentication in the kernel, LR values saved to the stack will have a PAC which we must strip in order to retrieve the real return address. Strip PACs when unwinding the stack in order to account for this. When function graph tracer is used with patchable-function-entry then return_to_handler will also have pac bits so strip it too. Reviewed-by: Kees Cook Acked-by: Catalin Marinas Reviewed-by: James Morse Signed-off-by: Mark Rutland Signed-off-by: Kristina Martsenko [Amit: Re-position ptrauth_strip_insn_pac, comment] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index a336cb124320..139679c745bf 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (frame->pc == (unsigned long)return_to_handler)) { + (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { struct ftrace_ret_stack *ret_stack; /* * This is a case where function graph tracer has @@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + frame->pc = ptrauth_strip_insn_pac(frame->pc); + /* * Frames created upon entry from EL0 have NULL FP and PC values, so * don't bother reporting these. Frames created by __noreturn functions -- cgit v1.2.3 From cdcb61ae4c56f9edcd1eca4c2df444f3f5e96e1d Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:35:00 +0530 Subject: arm64: __show_regs: strip PAC from lr in printk lr is printed with %pS which will try to find an entry in kallsyms. After enabling pointer authentication, this match will fail due to PAC present in the lr. Strip PAC from the lr to display the correct symbol name. Suggested-by: James Morse Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Acked-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7db0302bec00..cacae291ba27 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs) if (!user_mode(regs)) { printk("pc : %pS\n", (void *)regs->pc); - printk("lr : %pS\n", (void *)lr); + printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); } else { printk("pc : %016llx\n", regs->pc); printk("lr : %016llx\n", lr); -- cgit v1.2.3 From e51f5f56dd69e009e22af8a4354dce0817a7addb Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 13 Mar 2020 14:35:01 +0530 Subject: arm64: suspend: restore the kernel ptrauth keys This patch restores the kernel keys from current task during cpu resume after the mmu is turned on and ptrauth is enabled. A flag is added in macro ptrauth_keys_install_kernel to check if isb instruction needs to be executed. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Vincenzo Frascino Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm_pointer_auth.h | 6 ++++-- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/mm/proc.S | 2 ++ 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index d3f4aee42851..ce2a8486992b 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -39,14 +39,16 @@ alternative_if ARM64_HAS_GENERIC_AUTH alternative_else_nop_endif .endm - .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3 alternative_if ARM64_HAS_ADDRESS_AUTH mov \tmp1, #THREAD_KEYS_KERNEL add \tmp1, \tsk, \tmp1 ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA] msr_s SYS_APIAKEYLO_EL1, \tmp2 msr_s SYS_APIAKEYHI_EL1, \tmp3 + .if \sync == 1 isb + .endif alternative_else_nop_endif .endm @@ -55,7 +57,7 @@ alternative_else_nop_endif .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 .endm - .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3 .endm #endif /* CONFIG_ARM64_PTR_AUTH */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3dad2d000e3c..6273d7bed962 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -178,7 +178,7 @@ alternative_cb_end apply_ssbd 1, x22, x23 - ptrauth_keys_install_kernel tsk, x20, x22, x23 + ptrauth_keys_install_kernel tsk, 1, x20, x22, x23 .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -900,7 +900,7 @@ ENTRY(cpu_switch_to) ldr lr, [x8] mov sp, x9 msr sp_el0, x1 - ptrauth_keys_install_kernel x1, x8, x9, x10 + ptrauth_keys_install_kernel x1, 1, x8, x9, x10 ret ENDPROC(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 5a11a895e923..4450dc83cf5c 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif + ptrauth_keys_install_kernel x14, 0, x1, x2, x3 isb ret SYM_FUNC_END(cpu_do_resume) -- cgit v1.2.3 From 74afda4016a7437e6e425c3370e4b93b47be8ddf Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 13 Mar 2020 14:35:03 +0530 Subject: arm64: compile the kernel with ptrauth return address signing Compile all functions with two ptrauth instructions: PACIASP in the prologue to sign the return address, and AUTIASP in the epilogue to authenticate the return address (from the stack). If authentication fails, the return will cause an instruction abort to be taken, followed by an oops and killing the task. This should help protect the kernel against attacks using return-oriented programming. As ptrauth protects the return address, it can also serve as a replacement for CONFIG_STACKPROTECTOR, although note that it does not protect other parts of the stack. The new instructions are in the HINT encoding space, so on a system without ptrauth they execute as NOPs. CONFIG_ARM64_PTR_AUTH now not only enables ptrauth for userspace and KVM guests, but also automatically builds the kernel with ptrauth instructions if the compiler supports it. If there is no compiler support, we do not warn that the kernel was built without ptrauth instructions. GCC 7 and 8 support the -msign-return-address option, while GCC 9 deprecates that option and replaces it with -mbranch-protection. Support both options. Clang uses an external assembler hence this patch makes sure that the correct parameters (-march=armv8.3-a) are passed down to help it recognize the ptrauth instructions. Ftrace function tracer works properly with Ptrauth only when patchable-function-entry feature is present and is ensured by the Kconfig dependency. Cc: Catalin Marinas Cc: Will Deacon Cc: Masahiro Yamada Reviewed-by: Kees Cook Reviewed-by: Vincenzo Frascino # not co-dev parts Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Kristina Martsenko [Amit: Cover leaf function, comments, Ftrace Kconfig] Signed-off-by: Amit Daniel Kachhap Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 24 +++++++++++++++++++++++- arch/arm64/Makefile | 11 +++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 115ceea0293e..155041a5f0e4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1499,6 +1499,8 @@ config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y depends on !KVM || ARM64_VHE + depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Pointer authentication (part of the ARMv8.3 Extensions) provides instructions for signing and authenticating pointers against secret @@ -1506,11 +1508,17 @@ config ARM64_PTR_AUTH and other attacks. This option enables these instructions at EL0 (i.e. for userspace). - Choosing this option will cause the kernel to initialise secret keys for each process at exec() time, with these keys being context-switched along with the process. + If the compiler supports the -mbranch-protection or + -msign-return-address flag (e.g. GCC 7 or later), then this option + will also cause the kernel itself to be compiled with return address + protection. In this case, and if the target hardware is known to + support pointer authentication, then CONFIG_STACKPROTECTOR can be + disabled with minimal loss of protection. + The feature is detected at runtime. If the feature is not present in hardware it will not be advertised to userspace/KVM guest nor will it be enabled. However, KVM guest also require VHE mode and hence @@ -1522,6 +1530,20 @@ config ARM64_PTR_AUTH but with the feature disabled. On such a system, this option should not be selected. + This feature works with FUNCTION_GRAPH_TRACER option only if + DYNAMIC_FTRACE_WITH_REGS is enabled. + +config CC_HAS_BRANCH_PROT_PAC_RET + # GCC 9 or later, clang 8 or later + def_bool $(cc-option,-mbranch-protection=pac-ret+leaf) + +config CC_HAS_SIGN_RETURN_ADDRESS + # GCC 7, 8 + def_bool $(cc-option,-msign-return-address=all) + +config AS_HAS_PAC + def_bool $(as-option,-Wa$(comma)-march=armv8.3-a) + endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index dca1a97751ab..f15f92ba53e6 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -65,6 +65,17 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +ifeq ($(CONFIG_ARM64_PTR_AUTH),y) +branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all +branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf +# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the +# compiler to generate them and consequently to break the single image contract +# we pass it only to the assembler. This option is utilized only in case of non +# integrated assemblers. +branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a +KBUILD_CFLAGS += $(branch-prot-flags-y) +endif + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ -- cgit v1.2.3 From 3b446c7d27ddd06342901bb35211363f6944291a Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 19 Mar 2020 11:19:51 -0700 Subject: arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH Clang relies on GNU as from binutils to assemble the Linux kernel, currently. A recent patch to enable the armv8.3-a extension for pointer authentication checked for compiler support of the relevant flags. Everything works with binutils 2.34+, but for older versions we observe assembler errors: /tmp/vgettimeofday-36a54b.s: Assembler messages: /tmp/vgettimeofday-36a54b.s:40: Error: unknown pseudo-op: `.cfi_negate_ra_state' When compiling with Clang, require the assembler to support .cfi_negate_ra_state directives, in order to support CONFIG_ARM64_PTR_AUTH. Link: https://github.com/ClangBuiltLinux/linux/issues/938 Signed-off-by: Nick Desaulniers Signed-off-by: Catalin Marinas Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor --- arch/arm64/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 155041a5f0e4..c876afce10f3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1500,6 +1500,7 @@ config ARM64_PTR_AUTH default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Pointer authentication (part of the ARMv8.3 Extensions) provides @@ -1544,6 +1545,9 @@ config CC_HAS_SIGN_RETURN_ADDRESS config AS_HAS_PAC def_bool $(as-option,-Wa$(comma)-march=armv8.3-a) +config AS_HAS_CFI_NEGATE_RA_STATE + def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) + endmenu menu "ARMv8.5 architectural features" -- cgit v1.2.3