summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndrea Righi <arighi@nvidia.com>2025-05-15 22:11:43 +0300
committerTejun Heo <tj@kernel.org>2025-05-20 23:24:05 +0300
commit686d1337237161833684d8554c6e3ea2d692bd41 (patch)
treea6049e6c59698850afb8b8a7df429ac516c9daee /kernel
parent617a77018f683905ed4e8cd693df51243908353f (diff)
downloadlinux-686d1337237161833684d8554c6e3ea2d692bd41.tar.xz
sched_ext: idle: Validate locking correctness in scx_bpf_select_cpu_and()
Validate locking correctness when accessing p->nr_cpus_allowed and p->cpus_ptr inside scx_bpf_select_cpu_and(): if the rq lock is held, access is safe; otherwise, require that p->pi_lock is held. This allows to catch potential unsafe calls to scx_bpf_select_cpu_and(). Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/ext_idle.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index f0ebf8b5b908..716863f1f8ce 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -935,6 +935,7 @@ prev_cpu:
__bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
+ struct rq *rq;
s32 cpu;
if (!kf_cpu_valid(prev_cpu, NULL))
@@ -946,6 +947,15 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64
if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE))
return -EPERM;
+ /*
+ * Validate locking correctness to access p->cpus_ptr and
+ * p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
+ * otherwise, assert that p->pi_lock is held.
+ */
+ rq = scx_locked_rq();
+ if (!rq)
+ lockdep_assert_held(&p->pi_lock);
+
#ifdef CONFIG_SMP
/*
* This may also be called from ops.enqueue(), so we need to handle