diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-06 10:12:11 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-06 10:12:11 +0300 |
commit | e310396bb8d7db977a0e10ef7b5040e98b89c34c (patch) | |
tree | 461357ada35c637292884f0af1dc9fc41c0203ff /kernel/trace/trace.h | |
parent | c1ef57a3a3f5e69e98baf89055b423da62791c13 (diff) | |
parent | a00574036c261421721fa770ccd21a1012e1fbbd (diff) | |
download | linux-e310396bb8d7db977a0e10ef7b5040e98b89c34c.tar.xz |
Merge tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
- Added new "bootconfig".
This looks for a file appended to initrd to add boot config options,
and has been discussed thoroughly at Linux Plumbers.
Very useful for adding kprobes at bootup.
Only enabled if "bootconfig" is on the real kernel command line.
- Created dynamic event creation.
Merges common code between creating synthetic events and kprobe
events.
- Rename perf "ring_buffer" structure to "perf_buffer"
- Rename ftrace "ring_buffer" structure to "trace_buffer"
Had to rename existing "trace_buffer" to "array_buffer"
- Allow trace_printk() to work withing (some) tracing code.
- Sort of tracing configs to be a little better organized
- Fixed bug where ftrace_graph hash was not being protected properly
- Various other small fixes and clean ups
* tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (88 commits)
bootconfig: Show the number of nodes on boot message
tools/bootconfig: Show the number of bootconfig nodes
bootconfig: Add more parse error messages
bootconfig: Use bootconfig instead of boot config
ftrace: Protect ftrace_graph_hash with ftrace_sync
ftrace: Add comment to why rcu_dereference_sched() is open coded
tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu
tracing: Annotate ftrace_graph_hash pointer with __rcu
bootconfig: Only load bootconfig if "bootconfig" is on the kernel cmdline
tracing: Use seq_buf for building dynevent_cmd string
tracing: Remove useless code in dynevent_arg_pair_add()
tracing: Remove check_arg() callbacks from dynevent args
tracing: Consolidate some synth_event_trace code
tracing: Fix now invalid var_ref_vals assumption in trace action
tracing: Change trace_boot to use synth_event interface
tracing: Move tracing selftests to bottom of menu
tracing: Move mmio tracer config up with the other tracers
tracing: Move tracing test module configs together
tracing: Move all function tracing configs together
tracing: Documentation for in-kernel synthetic event API
...
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r-- | kernel/trace/trace.h | 98 |
1 files changed, 74 insertions, 24 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a98dce1b3334..99372dd7d168 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -93,6 +93,18 @@ enum trace_type { #include "trace_entries.h" +/* Use this for memory failure errors */ +#define MEM_FAIL(condition, fmt, ...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + pr_err("ERROR: " fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_warn_once); \ +}) + /* * syscalls are special, and need special handling, this is why * they are not included in trace_entries.h @@ -175,9 +187,9 @@ struct trace_array_cpu { struct tracer; struct trace_option_dentry; -struct trace_buffer { +struct array_buffer { struct trace_array *tr; - struct ring_buffer *buffer; + struct trace_buffer *buffer; struct trace_array_cpu __percpu *data; u64 time_start; int cpu; @@ -248,7 +260,7 @@ struct cond_snapshot { struct trace_array { struct list_head list; char *name; - struct trace_buffer trace_buffer; + struct array_buffer array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE /* * The max_buffer is used to snapshot the trace when a maximum @@ -256,12 +268,12 @@ struct trace_array { * Some tracers will use this to store a maximum trace while * it continues examining live traces. * - * The buffers for the max_buffer are set up the same as the trace_buffer + * The buffers for the max_buffer are set up the same as the array_buffer * When a snapshot is taken, the buffer of the max_buffer is swapped - * with the buffer of the trace_buffer and the buffers are reset for - * the trace_buffer so the tracing can continue. + * with the buffer of the array_buffer and the buffers are reset for + * the array_buffer so the tracing can continue. */ - struct trace_buffer max_buffer; + struct array_buffer max_buffer; bool allocated_snapshot; #endif #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) @@ -345,6 +357,8 @@ extern struct mutex trace_types_lock; extern int trace_array_get(struct trace_array *tr); extern int tracing_check_open_get_tr(struct trace_array *tr); +extern struct trace_array *trace_array_find(const char *instance); +extern struct trace_array *trace_array_find_get(const char *instance); extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); @@ -684,7 +698,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) int tracer_init(struct tracer *t, struct trace_array *tr); int tracing_is_enabled(void); -void tracing_reset_online_cpus(struct trace_buffer *buf); +void tracing_reset_online_cpus(struct array_buffer *buf); void tracing_reset_current(int cpu); void tracing_reset_all_online_cpus(void); int tracing_open_generic(struct inode *inode, struct file *filp); @@ -704,7 +718,7 @@ struct dentry *tracing_init_dentry(void); struct ring_buffer_event; struct ring_buffer_event * -trace_buffer_lock_reserve(struct ring_buffer *buffer, +trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned long flags, @@ -716,7 +730,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); -void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, +void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, struct ring_buffer_event *event); int trace_empty(struct trace_iterator *iter); @@ -872,7 +886,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args); extern int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args); -int trace_array_printk_buf(struct ring_buffer *buffer, +int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...); void trace_printk_seq(struct trace_seq *s); enum print_line_t print_trace_line(struct trace_iterator *iter); @@ -949,22 +963,31 @@ extern void __trace_graph_return(struct trace_array *tr, unsigned long flags, int pc); #ifdef CONFIG_DYNAMIC_FTRACE -extern struct ftrace_hash *ftrace_graph_hash; -extern struct ftrace_hash *ftrace_graph_notrace_hash; +extern struct ftrace_hash __rcu *ftrace_graph_hash; +extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) { unsigned long addr = trace->func; int ret = 0; + struct ftrace_hash *hash; preempt_disable_notrace(); - if (ftrace_hash_empty(ftrace_graph_hash)) { + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) + */ + hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); + + if (ftrace_hash_empty(hash)) { ret = 1; goto out; } - if (ftrace_lookup_ip(ftrace_graph_hash, addr)) { + if (ftrace_lookup_ip(hash, addr)) { /* * This needs to be cleared on the return functions @@ -1000,10 +1023,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) static inline int ftrace_graph_notrace_addr(unsigned long addr) { int ret = 0; + struct ftrace_hash *notrace_hash; preempt_disable_notrace(); - if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr)) + /* + * Have to open code "rcu_dereference_sched()" because the + * function graph tracer can be called when RCU is not + * "watching". + * Protected with schedule_on_each_cpu(ftrace_sync) + */ + notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, + !preemptible()); + + if (ftrace_lookup_ip(notrace_hash, addr)) ret = 1; preempt_enable_notrace(); @@ -1056,7 +1089,7 @@ struct ftrace_func_command { extern bool ftrace_filter_param __initdata; static inline int ftrace_trace_task(struct trace_array *tr) { - return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); + return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); } extern int ftrace_is_dead(void); int ftrace_create_function_files(struct trace_array *tr, @@ -1144,6 +1177,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd); void ftrace_create_filter_files(struct ftrace_ops *ops, struct dentry *parent); void ftrace_destroy_filter_files(struct ftrace_ops *ops); + +extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); #else struct ftrace_func_command; @@ -1366,17 +1404,17 @@ struct trace_subsystem_dir { }; extern int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event); void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); static inline void trace_buffer_unlock_commit(struct trace_array *tr, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc) { @@ -1389,7 +1427,7 @@ void trace_buffered_event_disable(void); void trace_buffered_event_enable(void); static inline void -__trace_event_discard_commit(struct ring_buffer *buffer, +__trace_event_discard_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { if (this_cpu_read(trace_buffered_event) == event) { @@ -1415,7 +1453,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer, */ static inline bool __event_trigger_test_discard(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, enum event_trigger_type *tt) @@ -1450,7 +1488,7 @@ __event_trigger_test_discard(struct trace_event_file *file, */ static inline void event_trigger_unlock_commit(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc) { @@ -1481,7 +1519,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, */ static inline void event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct ring_buffer *buffer, + struct trace_buffer *buffer, struct ring_buffer_event *event, void *entry, unsigned long irq_flags, int pc, struct pt_regs *regs) @@ -1892,6 +1930,15 @@ void trace_printk_start_comm(void); int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); +/* Used from boot time tracer */ +extern int trace_set_options(struct trace_array *tr, char *option); +extern int tracing_set_tracer(struct trace_array *tr, const char *buf); +extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, + unsigned long size, int cpu_id); +extern int tracing_set_cpumask(struct trace_array *tr, + cpumask_var_t tracing_cpumask_new); + + #define MAX_EVENT_NAME_LEN 64 extern int trace_run_command(const char *buf, int (*createfn)(int, char**)); @@ -1949,6 +1996,9 @@ static inline const char *get_syscall_name(int syscall) #ifdef CONFIG_EVENT_TRACING void trace_event_init(void); void trace_event_eval_update(struct trace_eval_map **map, int len); +/* Used from boot time tracer */ +extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); +extern int trigger_process_regex(struct trace_event_file *file, char *buff); #else static inline void __init trace_event_init(void) { } static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |