summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2021-08-31 11:10:00 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2021-08-31 11:10:00 +0300
commit65266a7c6abfa1ad915a362c41bf38576607f1f9 (patch)
tree046d86fc88c50fcdeaddb9ab4c709ae0878b1e3f /arch/arm64/kernel
parent1a7f67e618d42e9870dcd9fb0c7b2682d71fd631 (diff)
parent702f43872665e3b1cc6fdb77d238533274fc9d18 (diff)
downloadlinux-65266a7c6abfa1ad915a362c41bf38576607f1f9.tar.xz
Merge remote-tracking branch 'tip/sched/arm64' into for-next/core
* tip/sched/arm64: (785 commits) Documentation: arm64: describe asymmetric 32-bit support arm64: Remove logic to kill 32-bit tasks on 64-bit-only cores arm64: Hook up cmdline parameter to allow mismatched 32-bit EL0 arm64: Advertise CPUs capable of running 32-bit applications in sysfs arm64: Prevent offlining first CPU with 32-bit EL0 on mismatched system arm64: exec: Adjust affinity for compat tasks with mismatched 32-bit EL0 arm64: Implement task_cpu_possible_mask() sched: Introduce dl_task_check_affinity() to check proposed affinity sched: Allow task CPU affinity to be restricted on asymmetric systems sched: Split the guts of sched_setaffinity() into a helper function sched: Introduce task_struct::user_cpus_ptr to track requested affinity sched: Reject CPU affinity changes based on task_cpu_possible_mask() cpuset: Cleanup cpuset_cpus_allowed_fallback() use in select_fallback_rq() cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus() cpuset: Don't use the cpu_possible_mask as a last resort for cgroup v1 sched: Introduce task_cpu_possible_mask() to limit fallback rq selection sched: Cgroup SCHED_IDLE support sched/topology: Skip updating masks for non-online nodes Linux 5.14-rc6 lib: use PFN_PHYS() in devmem_is_allowed() ...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpufeature.c51
-rw-r--r--arch/arm64/kernel/kaslr.c4
-rw-r--r--arch/arm64/kernel/process.c47
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/signal.c29
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/syscall.c9
7 files changed, 96 insertions, 48 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 23b1e3d83603..b2770d753ba3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/minmax.h>
#include <linux/mm.h>
@@ -1321,6 +1322,31 @@ const struct cpumask *system_32bit_el0_cpumask(void)
return cpu_possible_mask;
}
+static int __init parse_32bit_el0_param(char *str)
+{
+ allow_mismatched_32bit_el0 = true;
+ return 0;
+}
+early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param);
+
+static ssize_t aarch32_el0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *mask = system_32bit_el0_cpumask();
+
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask));
+}
+static const DEVICE_ATTR_RO(aarch32_el0);
+
+static int __init aarch32_el0_sysfs_init(void)
+{
+ if (!allow_mismatched_32bit_el0)
+ return 0;
+
+ return device_create_file(cpu_subsys.dev_root, &dev_attr_aarch32_el0);
+}
+device_initcall(aarch32_el0_sysfs_init);
+
static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
{
if (!has_cpuid_feature(entry, scope))
@@ -2902,15 +2928,38 @@ void __init setup_cpu_features(void)
static int enable_mismatched_32bit_el0(unsigned int cpu)
{
+ /*
+ * The first 32-bit-capable CPU we detected and so can no longer
+ * be offlined by userspace. -1 indicates we haven't yet onlined
+ * a 32-bit-capable CPU.
+ */
+ static int lucky_winner = -1;
+
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
if (cpu_32bit) {
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
- setup_elf_hwcaps(compat_elf_hwcaps);
}
+ if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
+ return 0;
+
+ if (lucky_winner >= 0)
+ return 0;
+
+ /*
+ * We've detected a mismatch. We need to keep one of our CPUs with
+ * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
+ * every CPU in the system for a 32-bit task.
+ */
+ lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
+ cpu_active_mask);
+ get_cpu_device(lucky_winner)->offline_disabled = true;
+ setup_elf_hwcaps(compat_elf_hwcaps);
+ pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
+ cpu, lucky_winner);
return 0;
}
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index cfa2cfde3019..418b2bba1521 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void)
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
* _stext) . This guarantees that the resulting region still
* covers [_stext, _etext], and that all relative branches can
- * be resolved without veneers.
+ * be resolved without veneers unless this region is exhausted
+ * and we fall back to a larger 2GB window in module_alloc()
+ * when ARM64_MODULE_PLTS is enabled.
*/
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5464d575192b..2bd270cd603e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
+#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
@@ -472,15 +473,6 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
-static void compat_thread_switch(struct task_struct *next)
-{
- if (!is_compat_thread(task_thread_info(next)))
- return;
-
- if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
- set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
-}
-
/*
* __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
* this function must be called with preemption disabled and the update to
@@ -515,7 +507,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
- compat_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -576,6 +567,28 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ~0xf;
}
+#ifdef CONFIG_COMPAT
+int compat_elf_check_arch(const struct elf32_hdr *hdr)
+{
+ if (!system_supports_32bit_el0())
+ return false;
+
+ if ((hdr)->e_machine != EM_ARM)
+ return false;
+
+ if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
+ return false;
+
+ /*
+ * Prevent execve() of a 32-bit program from a deadline task
+ * if the restricted affinity mask would be inadmissible on an
+ * asymmetric system.
+ */
+ return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ !dl_task_check_affinity(current, system_32bit_el0_cpumask());
+}
+#endif
+
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
@@ -585,8 +598,20 @@ void arch_setup_new_exec(void)
if (is_compat_task()) {
mmflags = MMCF_AARCH32;
+
+ /*
+ * Restrict the CPU affinity mask for a 32-bit task so that
+ * it contains only 32-bit-capable CPUs.
+ *
+ * From the perspective of the task, this looks similar to
+ * what would happen if the 64-bit-only CPUs were hot-unplugged
+ * at the point of execve(), although we try a bit harder to
+ * honour the cpuset hierarchy.
+ */
if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
- set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ force_compatible_cpus_allowed_ptr(current);
+ } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
+ relax_compatible_cpus_allowed_ptr(current);
}
current->mm->context.flags = mmflags;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 956e2df27180..e26196a33cf4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1867,7 +1867,7 @@ void syscall_trace_exit(struct pt_regs *regs)
audit_syscall_exit(regs);
if (flags & _TIF_SYSCALL_TRACEPOINT)
- trace_sys_exit(regs, regs_return_value(regs));
+ trace_sys_exit(regs, syscall_get_return_value(current, regs));
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index e93ffd7d38e1..fb54fb76e17f 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -29,6 +29,7 @@
#include <asm/unistd.h>
#include <asm/fpsimd.h>
#include <asm/ptrace.h>
+#include <asm/syscall.h>
#include <asm/signal32.h>
#include <asm/traps.h>
#include <asm/vdso.h>
@@ -895,7 +896,7 @@ static void do_signal(struct pt_regs *regs)
retval == -ERESTART_RESTARTBLOCK ||
(retval == -ERESTARTSYS &&
!(ksig.ka.sa.sa_flags & SA_RESTART)))) {
- regs->regs[0] = -EINTR;
+ syscall_set_return_value(current, regs, -EINTR, 0);
regs->pc = continue_addr;
}
@@ -916,19 +917,6 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
-static bool cpu_affinity_invalid(struct pt_regs *regs)
-{
- if (!compat_user_mode(regs))
- return false;
-
- /*
- * We're preemptible, but a reschedule will cause us to check the
- * affinity again.
- */
- return !cpumask_test_cpu(raw_smp_processor_id(),
- system_32bit_el0_cpumask());
-}
-
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
{
do {
@@ -955,19 +943,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
if (thread_flags & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
-
- /*
- * If we reschedule after checking the affinity
- * then we must ensure that TIF_NOTIFY_RESUME
- * is set so that we check the affinity again.
- * Since tracehook_notify_resume() clears the
- * flag, ensure that the compiler doesn't move
- * it after the affinity check.
- */
- barrier();
-
- if (cpu_affinity_invalid(regs))
- force_sig(SIGKILL);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index b83c8d911930..8982a2b78acf 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 263d6c1a525f..50a0f1a38e84 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
ret = do_ni_syscall(regs, scno);
}
- if (is_compat_task())
- ret = lower_32_bits(ret);
-
- regs->regs[0] = ret;
+ syscall_set_return_value(current, regs, 0, ret);
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
@@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* syscall. do_notify_resume() will send a signal to userspace
* before the syscall is restarted.
*/
- regs->regs[0] = -ERESTARTNOINTR;
+ syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
return;
}
@@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* anyway.
*/
if (scno == NO_SYSCALL)
- regs->regs[0] = -ENOSYS;
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
scno = syscall_trace_enter(regs);
if (scno == NO_SYSCALL)
goto trace_exit;