summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/cpufeature.h5
-rw-r--r--arch/arm64/include/asm/processor.h1
-rw-r--r--arch/arm64/kernel/fpsimd.c4
-rw-r--r--arch/arm64/kernel/process.c14
4 files changed, 22 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5ddfae233ea5..14a8f3d93add 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -778,6 +778,11 @@ static __always_inline bool system_supports_fa64(void)
cpus_have_const_cap(ARM64_SME_FA64);
}
+static __always_inline bool system_supports_tpidr2(void)
+{
+ return system_supports_sme();
+}
+
static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 7a57cbff8a03..849e97d418a8 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -169,6 +169,7 @@ struct thread_struct {
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 tpidr2_el0;
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 39f44fcb9b99..231f2d85b65e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1098,6 +1098,10 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
/* Allow SME in kernel */
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
isb();
+
+ /* Allow EL0 to access TPIDR2 */
+ write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
+ isb();
}
/*
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7fa97df55e3a..e20571f19718 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -250,6 +250,8 @@ void show_regs(struct pt_regs *regs)
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
@@ -343,6 +345,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* out-of-sync with the saved value.
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2())
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
@@ -353,10 +357,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
/*
* If a TLS pointer was passed to clone, use it for the new
- * thread.
+ * thread. We also reset TPIDR2 if it's in use.
*/
- if (clone_flags & CLONE_SETTLS)
+ if (clone_flags & CLONE_SETTLS) {
p->thread.uw.tp_value = tls;
+ p->thread.tpidr2_el0 = 0;
+ }
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
@@ -387,6 +393,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2() && !is_compat_task())
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
@@ -399,6 +407,8 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
/*