summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-16 20:04:38 +0300
committerIngo Molnar <mingo@elte.hu>2009-12-16 21:01:57 +0300
commit5da9a0fb673a0ea0a093862f95f6b89b3390c31e (patch)
treefa4465d5ce050cdc42e63a2518ce2e1b58fa9c30 /kernel/sched.c
parent3802290628348674985d14914f9bfee7b9084548 (diff)
downloadlinux-5da9a0fb673a0ea0a093862f95f6b89b3390c31e.tar.xz
sched: Fix select_task_rq() vs hotplug issues
Since select_task_rq() is now responsible for guaranteeing ->cpus_allowed and cpu_active_mask, we need to verify this. select_task_rq_rt() can blindly return smp_processor_id()/task_cpu() without checking the valid masks, select_task_rq_fair() can do the same in the rare case that all SD_flags are disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <20091216170517.961475466@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c75
1 files changed, 40 insertions, 35 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 63e55ac242d1..cc40bdadee7a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2317,6 +2317,43 @@ void task_oncpu_function_call(struct task_struct *p,
}
#ifdef CONFIG_SMP
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+ int dest_cpu;
+ const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Look for allowed, online CPU in same node. */
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ return dest_cpu;
+
+ /* Any allowed, online CPU? */
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ if (dest_cpu < nr_cpu_ids)
+ return dest_cpu;
+
+ /* No more Mr. Nice Guy. */
+ if (dest_cpu >= nr_cpu_ids) {
+ rcu_read_lock();
+ cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+ rcu_read_unlock();
+ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+ printk(KERN_INFO "process %d (%s) no "
+ "longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+ }
+
+ return dest_cpu;
+}
+
/*
* Called from:
*
@@ -2343,14 +2380,8 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
- !cpu_active(cpu))) {
-
- cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
- /*
- * XXX: race against hot-plug modifying cpu_active_mask
- */
- BUG_ON(cpu >= nr_cpu_ids);
- }
+ !cpu_active(cpu)))
+ cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
@@ -7319,36 +7350,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
int dest_cpu;
- const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
again:
- /* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
- goto move;
-
- /* Any allowed, online CPU? */
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
- if (dest_cpu < nr_cpu_ids)
- goto move;
-
- /* No more Mr. Nice Guy. */
- if (dest_cpu >= nr_cpu_ids) {
- cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
- pr_info("process %d (%s) no longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
- }
- }
+ dest_cpu = select_fallback_rq(dead_cpu, p);
-move:
/* It can have affinity changed while we were choosing. */
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
goto again;