summaryrefslogtreecommitdiff
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-11-17 20:05:23 +0300
committerIngo Molnar <mingo@kernel.org>2015-11-23 11:48:18 +0300
commit1b034bd989aa4a396c13d305759c376c52595a97 (patch)
treeea51c57f06f8d3e61e2a19a712db9d7b28d9bebb /kernel/stop_machine.c
parent6a19005157c464b47b2082f2617d12bc11198a0d (diff)
downloadlinux-1b034bd989aa4a396c13d305759c376c52595a97.tar.xz
stop_machine: Make cpu_stop_queue_work() and stop_one_cpu_nowait() return bool
Change cpu_stop_queue_work() to return true if the work was queued and change stop_one_cpu_nowait() to return the result of cpu_stop_queue_work(). This makes it more useful, for example now you can alloc cpu_stop_work for stop_one_cpu_nowait() and free it in the callback or if stop_one_cpu_nowait() fails, currently this is impossible because you can't know if @fn will be called or not. Also, this allows to kill cpu_stop_done->executed, see the next changes. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Milos Vyletel <milos@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20151117170523.GA13955@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 17f01a9dc3df..0ec1f16da379 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -81,17 +81,21 @@ static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
}
/* queue @work to @stopper. if offline, @work is completed immediately */
-static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
unsigned long flags;
+ bool enabled;
spin_lock_irqsave(&stopper->lock, flags);
- if (stopper->enabled)
+ enabled = stopper->enabled;
+ if (enabled)
__cpu_stop_queue_work(stopper, work);
else
cpu_stop_signal_done(work->done, false);
spin_unlock_irqrestore(&stopper->lock, flags);
+
+ return enabled;
}
/**
@@ -297,12 +301,16 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
*
* CONTEXT:
* Don't care.
+ *
+ * RETURNS:
+ * true if cpu_stop_work was queued successfully and @fn will be called,
+ * false otherwise.
*/
-void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf)
{
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
- cpu_stop_queue_work(cpu, work_buf);
+ return cpu_stop_queue_work(cpu, work_buf);
}
/* static data for stop_cpus */