diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-16 02:05:47 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-16 02:05:47 +0300 |
commit | d2d8b146043ae7e250aef1fb312971f6f479d487 (patch) | |
tree | 22db8758a5aa0bc850ba8f83fe57b1f679924d0a /include/trace | |
parent | 2bbacd1a92788ee334c7e92b765ea16ebab68dfe (diff) | |
parent | 693713cbdb3a4bda5a8a678c31f06560bbb14657 (diff) | |
download | linux-d2d8b146043ae7e250aef1fb312971f6f479d487.tar.xz |
Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"The major changes in this tracing update includes:
- Removal of non-DYNAMIC_FTRACE from 32bit x86
- Removal of mcount support from x86
- Emulating a call from int3 on x86_64, fixes live kernel patching
- Consolidated Tracing Error logs file
Minor updates:
- Removal of klp_check_compiler_support()
- kdb ftrace dumping output changes
- Accessing and creating ftrace instances from inside the kernel
- Clean up of #define if macro
- Introduction of TRACE_EVENT_NOP() to disable trace events based on
config options
And other minor fixes and clean ups"
* tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
x86: Hide the int3_emulate_call/jmp functions from UML
livepatch: Remove klp_check_compiler_support()
ftrace/x86: Remove mcount support
ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
tracing: Simplify "if" macro code
tracing: Fix documentation about disabling options using trace_options
tracing: Replace kzalloc with kcalloc
tracing: Fix partial reading of trace event's id file
tracing: Allow RCU to run between postponed startup tests
tracing: Fix white space issues in parse_pred() function
tracing: Eliminate const char[] auto variables
ring-buffer: Fix mispelling of Calculate
tracing: probeevent: Fix to make the type of $comm string
tracing: probeevent: Do not accumulate on ret variable
tracing: uprobes: Re-enable $comm support for uprobe events
ftrace/x86_64: Emulate call function while updating in breakpoint handler
x86_64: Allow breakpoints to emulate call instructions
x86_64: Add gap to int3 to allow for call emulation
tracing: kdb: Allow ftdump to skip all but the last few entries
tracing: Add trace_total_entries() / trace_total_entries_cpu()
...
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/define_trace.h | 8 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 81 | ||||
-rw-r--r-- | include/trace/events/sched.h | 21 |
3 files changed, 47 insertions, 63 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index cb30c5532144..bd75f97867b9 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -46,6 +46,12 @@ assign, print, reg, unreg) \ DEFINE_TRACE_FN(name, reg, unreg) +#undef TRACE_EVENT_NOP +#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) + +#undef DEFINE_EVENT_NOP +#define DEFINE_EVENT_NOP(template, name, proto, args) + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ DEFINE_TRACE(name) @@ -102,6 +108,8 @@ #undef TRACE_EVENT_FN #undef TRACE_EVENT_FN_COND #undef TRACE_EVENT_CONDITION +#undef TRACE_EVENT_NOP +#undef DEFINE_EVENT_NOP #undef DECLARE_EVENT_CLASS #undef DEFINE_EVENT #undef DEFINE_EVENT_FN diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 80339fd14c1c..02a3f78f7cd8 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -7,6 +7,12 @@ #include <linux/tracepoint.h> +#ifdef CONFIG_RCU_TRACE +#define TRACE_EVENT_RCU TRACE_EVENT +#else +#define TRACE_EVENT_RCU TRACE_EVENT_NOP +#endif + /* * Tracepoint for start/end markers used for utilization calculations. * By convention, the string is of the following forms: @@ -35,8 +41,6 @@ TRACE_EVENT(rcu_utilization, TP_printk("%s", __entry->s) ); -#ifdef CONFIG_RCU_TRACE - #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) /* @@ -62,7 +66,7 @@ TRACE_EVENT(rcu_utilization, * "end": End a grace period. * "cpuend": CPU first notices a grace-period end. */ -TRACE_EVENT(rcu_grace_period, +TRACE_EVENT_RCU(rcu_grace_period, TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent), @@ -101,7 +105,7 @@ TRACE_EVENT(rcu_grace_period, * "Cleanup": Clean up rcu_node structure after previous GP. * "CleanupMore": Clean up, and another GP is needed. */ -TRACE_EVENT(rcu_future_grace_period, +TRACE_EVENT_RCU(rcu_future_grace_period, TP_PROTO(const char *rcuname, unsigned long gp_seq, unsigned long gp_seq_req, u8 level, int grplo, int grphi, @@ -141,7 +145,7 @@ TRACE_EVENT(rcu_future_grace_period, * rcu_node structure, and the mask of CPUs that will be waited for. * All but the type of RCU are extracted from the rcu_node structure. */ -TRACE_EVENT(rcu_grace_period_init, +TRACE_EVENT_RCU(rcu_grace_period_init, TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level, int grplo, int grphi, unsigned long qsmask), @@ -186,7 +190,7 @@ TRACE_EVENT(rcu_grace_period_init, * "endwake": Woke piggybackers up. * "done": Someone else did the expedited grace period for us. */ -TRACE_EVENT(rcu_exp_grace_period, +TRACE_EVENT_RCU(rcu_exp_grace_period, TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent), @@ -218,7 +222,7 @@ TRACE_EVENT(rcu_exp_grace_period, * "nxtlvl": Advance to next level of rcu_node funnel * "wait": Wait for someone else to do expedited GP */ -TRACE_EVENT(rcu_exp_funnel_lock, +TRACE_EVENT_RCU(rcu_exp_funnel_lock, TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi, const char *gpevent), @@ -269,7 +273,7 @@ TRACE_EVENT(rcu_exp_funnel_lock, * "WaitQueue": Enqueue partially done, timed wait for it to complete. * "WokeQueue": Partial enqueue now complete. */ -TRACE_EVENT(rcu_nocb_wake, +TRACE_EVENT_RCU(rcu_nocb_wake, TP_PROTO(const char *rcuname, int cpu, const char *reason), @@ -297,7 +301,7 @@ TRACE_EVENT(rcu_nocb_wake, * include SRCU), the grace-period number that the task is blocking * (the current or the next), and the task's PID. */ -TRACE_EVENT(rcu_preempt_task, +TRACE_EVENT_RCU(rcu_preempt_task, TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq), @@ -324,7 +328,7 @@ TRACE_EVENT(rcu_preempt_task, * read-side critical section exiting that critical section. Track the * type of RCU (which one day might include SRCU) and the task's PID. */ -TRACE_EVENT(rcu_unlock_preempted_task, +TRACE_EVENT_RCU(rcu_unlock_preempted_task, TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid), @@ -353,7 +357,7 @@ TRACE_EVENT(rcu_unlock_preempted_task, * whether there are any blocked tasks blocking the current grace period. * All but the type of RCU are extracted from the rcu_node structure. */ -TRACE_EVENT(rcu_quiescent_state_report, +TRACE_EVENT_RCU(rcu_quiescent_state_report, TP_PROTO(const char *rcuname, unsigned long gp_seq, unsigned long mask, unsigned long qsmask, @@ -396,7 +400,7 @@ TRACE_EVENT(rcu_quiescent_state_report, * state, which can be "dti" for dyntick-idle mode or "kick" when kicking * a CPU that has been in dyntick-idle mode for too long. */ -TRACE_EVENT(rcu_fqs, +TRACE_EVENT_RCU(rcu_fqs, TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent), @@ -436,7 +440,7 @@ TRACE_EVENT(rcu_fqs, * events use two separate counters, and that the "++=" and "--=" events * for irq/NMI will change the counter by two, otherwise by one. */ -TRACE_EVENT(rcu_dyntick, +TRACE_EVENT_RCU(rcu_dyntick, TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), @@ -468,7 +472,7 @@ TRACE_EVENT(rcu_dyntick, * number of lazy callbacks queued, and the fourth element is the * total number of callbacks queued. */ -TRACE_EVENT(rcu_callback, +TRACE_EVENT_RCU(rcu_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, long qlen), @@ -504,7 +508,7 @@ TRACE_EVENT(rcu_callback, * the fourth argument is the number of lazy callbacks queued, and the * fifth argument is the total number of callbacks queued. */ -TRACE_EVENT(rcu_kfree_callback, +TRACE_EVENT_RCU(rcu_kfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, long qlen_lazy, long qlen), @@ -539,7 +543,7 @@ TRACE_EVENT(rcu_kfree_callback, * the total number of callbacks queued, and the fourth argument is * the current RCU-callback batch limit. */ -TRACE_EVENT(rcu_batch_start, +TRACE_EVENT_RCU(rcu_batch_start, TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), @@ -569,7 +573,7 @@ TRACE_EVENT(rcu_batch_start, * The first argument is the type of RCU, and the second argument is * a pointer to the RCU callback itself. */ -TRACE_EVENT(rcu_invoke_callback, +TRACE_EVENT_RCU(rcu_invoke_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp), @@ -598,7 +602,7 @@ TRACE_EVENT(rcu_invoke_callback, * is the offset of the callback within the enclosing RCU-protected * data structure. */ -TRACE_EVENT(rcu_invoke_kfree_callback, +TRACE_EVENT_RCU(rcu_invoke_kfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), @@ -631,7 +635,7 @@ TRACE_EVENT(rcu_invoke_kfree_callback, * and the sixth argument (risk) is the return value from * rcu_is_callbacks_kthread(). */ -TRACE_EVENT(rcu_batch_end, +TRACE_EVENT_RCU(rcu_batch_end, TP_PROTO(const char *rcuname, int callbacks_invoked, char cb, char nr, char iit, char risk), @@ -673,7 +677,7 @@ TRACE_EVENT(rcu_batch_end, * callback address can be NULL. */ #define RCUTORTURENAME_LEN 8 -TRACE_EVENT(rcu_torture_read, +TRACE_EVENT_RCU(rcu_torture_read, TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, unsigned long secs, unsigned long c_old, unsigned long c), @@ -721,7 +725,7 @@ TRACE_EVENT(rcu_torture_read, * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ -TRACE_EVENT(rcu_barrier, +TRACE_EVENT_RCU(rcu_barrier, TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), @@ -748,41 +752,6 @@ TRACE_EVENT(rcu_barrier, __entry->done) ); -#else /* #ifdef CONFIG_RCU_TRACE */ - -#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0) -#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \ - level, grplo, grphi, event) \ - do { } while (0) -#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \ - qsmask) do { } while (0) -#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \ - do { } while (0) -#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \ - do { } while (0) -#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) -#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0) -#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0) -#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \ - grplo, grphi, gp_tasks) do { } \ - while (0) -#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0) -#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0) -#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) -#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ - do { } while (0) -#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \ - do { } while (0) -#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) -#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) -#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ - do { } while (0) -#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ - do { } while (0) -#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) - -#endif /* #else #ifdef CONFIG_RCU_TRACE */ - #endif /* _TRACE_RCU_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9a4bdfadab07..c8c7c7efb487 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -241,7 +241,6 @@ DECLARE_EVENT_CLASS(sched_process_template, DEFINE_EVENT(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p)); - /* * Tracepoint for a task exiting: @@ -336,11 +335,20 @@ TRACE_EVENT(sched_process_exec, __entry->pid, __entry->old_pid) ); + +#ifdef CONFIG_SCHEDSTATS +#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT +#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS +#else +#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP +#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP +#endif + /* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ -DECLARE_EVENT_CLASS(sched_stat_template, +DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template, TP_PROTO(struct task_struct *tsk, u64 delay), @@ -363,12 +371,11 @@ DECLARE_EVENT_CLASS(sched_stat_template, (unsigned long long)__entry->delay) ); - /* * Tracepoint for accounting wait time (time the task is runnable * but not actually running due to scheduler contention). */ -DEFINE_EVENT(sched_stat_template, sched_stat_wait, +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); @@ -376,7 +383,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_wait, * Tracepoint for accounting sleep time (time the task is not runnable, * including iowait, see below). */ -DEFINE_EVENT(sched_stat_template, sched_stat_sleep, +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); @@ -384,14 +391,14 @@ DEFINE_EVENT(sched_stat_template, sched_stat_sleep, * Tracepoint for accounting iowait time (time the task is not runnable * due to waiting on IO to complete). */ -DEFINE_EVENT(sched_stat_template, sched_stat_iowait, +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); /* * Tracepoint for accounting blocked time (time the task is in uninterruptible). */ -DEFINE_EVENT(sched_stat_template, sched_stat_blocked, +DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); |