summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ca2bb629595f..79ddba55b123 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1922,6 +1922,24 @@ static int migration_cpu_stop(void *data)
rq_lock(rq, &rf);
pending = p->migration_pending;
+ if (pending && !arg->pending) {
+ /*
+ * This happens from sched_exec() and migrate_task_to(),
+ * neither of them care about pending and just want a task to
+ * maybe move about.
+ *
+ * Even if there is a pending, we can ignore it, since
+ * affine_move_task() will have it's own stop_work's in flight
+ * which will manage the completion.
+ *
+ * Notably, pending doesn't need to match arg->pending. This can
+ * happen when tripple concurrent affine_move_task() first sets
+ * pending, then clears pending and eventually sets another
+ * pending.
+ */
+ pending = NULL;
+ }
+
/*
* If task_rq(p) != rq, it cannot be migrated here, because we're
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
@@ -2194,10 +2212,6 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
int dest_cpu, unsigned int flags)
{
struct set_affinity_pending my_pending = { }, *pending = NULL;
- struct migration_arg arg = {
- .task = p,
- .dest_cpu = dest_cpu,
- };
bool complete = false;
/* Can the task run on the task's current CPU? If so, we're done */
@@ -2235,6 +2249,12 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
/* Install the request */
refcount_set(&my_pending.refs, 1);
init_completion(&my_pending.done);
+ my_pending.arg = (struct migration_arg) {
+ .task = p,
+ .dest_cpu = -1, /* any */
+ .pending = &my_pending,
+ };
+
p->migration_pending = &my_pending;
} else {
pending = p->migration_pending;
@@ -2265,12 +2285,6 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
p->migration_flags &= ~MDF_PUSH;
task_rq_unlock(rq, p, rf);
- pending->arg = (struct migration_arg) {
- .task = p,
- .dest_cpu = -1,
- .pending = pending,
- };
-
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
&pending->arg, &pending->stop_work);
@@ -2283,8 +2297,11 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
* is_migration_disabled(p) checks to the stopper, which will
* run on the same CPU as said p.
*/
+ refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
task_rq_unlock(rq, p, rf);
- stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
} else {