summaryrefslogtreecommitdiff
path: root/samples/trace_events/trace-events-sample.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 19:37:41 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 19:37:41 +0300
commit41cbc01f6e49e48bc3d78158cec0a2d4ff6c906d (patch)
treec96efc28f4b3b7d5b785fd4e4b44a0376adcfa12 /samples/trace_events/trace-events-sample.c
parent12df4289ee8e4dccf932b7186b391bb6d2b915fa (diff)
parent1e0d6714aceb770b04161fbedd7765d0e1fc27bd (diff)
downloadlinux-41cbc01f6e49e48bc3d78158cec0a2d4ff6c906d.tar.xz
Merge tag 'trace-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "The updates included in this pull request for ftrace are: o Several clean ups to the code One such clean up was to convert to 64 bit time keeping, in the ring buffer benchmark code. o Adding of __print_array() helper macro for TRACE_EVENT() o Updating the sample/trace_events/ to add samples of different ways to make trace events. Lots of features have been added since the sample code was made, and these features are mostly unknown. Developers have been making their own hacks to do things that are already available. o Performance improvements. Most notably, I found a performance bug where a waiter that is waiting for a full page from the ring buffer will see that a full page is not available, and go to sleep. The sched event caused by it going to sleep would cause it to wake up again. It would see that there was still not a full page, and go back to sleep again, and that would wake it up again, until finally it would see a full page. This change has been marked for stable. Other improvements include removing global locks from fast paths" * tag 'trace-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ring-buffer: Do not wake up a splice waiter when page is not full tracing: Fix unmapping loop in tracing_mark_write tracing: Add samples of DECLARE_EVENT_CLASS() and DEFINE_EVENT() tracing: Add TRACE_EVENT_FN example tracing: Add TRACE_EVENT_CONDITION sample tracing: Update the TRACE_EVENT fields available in the sample code tracing: Separate out initializing top level dir from instances tracing: Make tracing_init_dentry_tr() static trace: Use 64-bit timekeeping tracing: Add array printing helper tracing: Remove newline from trace_printk warning banner tracing: Use IS_ERR() check for return value of tracing_init_dentry() tracing: Remove unneeded includes of debugfs.h and fs.h tracing: Remove taking of trace_types_lock in pipe files tracing: Add ref count to tracer for when they are being read by pipe
Diffstat (limited to 'samples/trace_events/trace-events-sample.c')
-rw-r--r--samples/trace_events/trace-events-sample.c80
1 files changed, 79 insertions, 1 deletions
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index aabc4e970911..880a7d1d27d2 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -10,12 +10,38 @@
#define CREATE_TRACE_POINTS
#include "trace-events-sample.h"
+static const char *random_strings[] = {
+ "Mother Goose",
+ "Snoopy",
+ "Gandalf",
+ "Frodo",
+ "One ring to rule them all"
+};
static void simple_thread_func(int cnt)
{
+ int array[6];
+ int len = cnt % 5;
+ int i;
+
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
- trace_foo_bar("hello", cnt);
+
+ for (i = 0; i < len; i++)
+ array[i] = i + 1;
+ array[i] = 0;
+
+ /* Silly tracepoints */
+ trace_foo_bar("hello", cnt, array, random_strings[len],
+ tsk_cpus_allowed(current));
+
+ trace_foo_with_template_simple("HELLO", cnt);
+
+ trace_foo_bar_with_cond("Some times print", cnt);
+
+ trace_foo_with_template_cond("prints other times", cnt);
+
+ trace_foo_with_template_print("I have to be different", cnt);
}
static int simple_thread(void *arg)
@@ -29,6 +55,53 @@ static int simple_thread(void *arg)
}
static struct task_struct *simple_tsk;
+static struct task_struct *simple_tsk_fn;
+
+static void simple_thread_func_fn(int cnt)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /* More silly tracepoints */
+ trace_foo_bar_with_fn("Look at me", cnt);
+ trace_foo_with_template_fn("Look at me too", cnt);
+}
+
+static int simple_thread_fn(void *arg)
+{
+ int cnt = 0;
+
+ while (!kthread_should_stop())
+ simple_thread_func_fn(cnt++);
+
+ return 0;
+}
+
+static DEFINE_MUTEX(thread_mutex);
+
+void foo_bar_reg(void)
+{
+ pr_info("Starting thread for foo_bar_fn\n");
+ /*
+ * We shouldn't be able to start a trace when the module is
+ * unloading (there's other locks to prevent that). But
+ * for consistency sake, we still take the thread_mutex.
+ */
+ mutex_lock(&thread_mutex);
+ simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ mutex_unlock(&thread_mutex);
+}
+
+void foo_bar_unreg(void)
+{
+ pr_info("Killing thread for foo_bar_fn\n");
+ /* protect against module unloading */
+ mutex_lock(&thread_mutex);
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ mutex_unlock(&thread_mutex);
+}
static int __init trace_event_init(void)
{
@@ -42,6 +115,11 @@ static int __init trace_event_init(void)
static void __exit trace_event_exit(void)
{
kthread_stop(simple_tsk);
+ mutex_lock(&thread_mutex);
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ mutex_unlock(&thread_mutex);
}
module_init(trace_event_init);