summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2020-12-15 06:03:14 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 23:13:36 +0300
commitf630c7c6f10546ebff15c3a856e7949feb7a2372 (patch)
treeb21e48738e7593486afdcfded9479a30f868a85a /kernel
parent2c85ebc57b3e1817b6ce1a6b703928e113a90442 (diff)
downloadlinux-f630c7c6f10546ebff15c3a856e7949feb7a2372.tar.xz
kthread: add kthread_work tracepoints
While migrating some code from wq to kthread_worker, I found that I missed the execute_start/end tracepoints. So add similar tracepoints for kthread_work. And for completeness, queue_work tracepoint (although this one differs slightly from the matching workqueue tracepoint). Link: https://lkml.kernel.org/r/20201010180323.126634-1-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org> Cc: Rob Clark <robdclark@chromium.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Phil Auld <pauld@redhat.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Thara Gopinath <thara.gopinath@linaro.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Vincent Donnefort <vincent.donnefort@arm.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Frederic Weisbecker <frederic@kernel.org> Cc: Ilias Stamatis <stamatis.iliass@gmail.com> Cc: Liang Chen <cl@rock-chips.com> Cc: Ben Dooks <ben.dooks@codethink.co.uk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "J. Bruce Fields" <bfields@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kthread.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 933a625621b8..34516b0a6eb7 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -704,8 +704,15 @@ repeat:
raw_spin_unlock_irq(&worker->lock);
if (work) {
+ kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING);
+ trace_sched_kthread_work_execute_start(work);
work->func(work);
+ /*
+ * Avoid dereferencing work after this point. The trace
+ * event only cares about the address.
+ */
+ trace_sched_kthread_work_execute_end(work, func);
} else if (!freezing(current))
schedule();
@@ -834,6 +841,8 @@ static void kthread_insert_work(struct kthread_worker *worker,
{
kthread_insert_work_sanity_check(worker, work);
+ trace_sched_kthread_work_queue_work(worker, work);
+
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))