diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-28 02:23:47 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-28 02:23:47 +0400 |
commit | c5617b200ac52e35f7e8cf05a17b0a2d50f6b3e9 (patch) | |
tree | 40d5e99660c77c5791392d349a93113c044dbf14 /include | |
parent | cad719d86e9dbd06634eaba6401e022c8101d6b2 (diff) | |
parent | 49c177461bfbedeccbab22bf3905db2f9da7f1c3 (diff) | |
download | linux-c5617b200ac52e35f7e8cf05a17b0a2d50f6b3e9.tar.xz |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (61 commits)
tracing: Add __used annotation to event variable
perf, trace: Fix !x86 build bug
perf report: Support multiple events on the TUI
perf annotate: Fix up usage of the build id cache
x86/mmiotrace: Remove redundant instruction prefix checks
perf annotate: Add TUI interface
perf tui: Remove annotate from popup menu after failure
perf report: Don't start the TUI if -D is used
perf: Fix getline undeclared
perf: Optimize perf_tp_event_match()
perf: Remove more code from the fastpath
perf: Optimize the !vmalloc backed buffer
perf: Optimize perf_output_copy()
perf: Fix wakeup storm for RO mmap()s
perf-record: Share per-cpu buffers
perf-record: Remove -M
perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers
perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events
perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction
perf tui: Allow disabling the TUI on a per command basis in ~/.perfconfig
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace_event.h | 103 | ||||
-rw-r--r-- | include/linux/perf_event.h | 28 | ||||
-rw-r--r-- | include/linux/syscalls.h | 57 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 98 | ||||
-rw-r--r-- | include/trace/ftrace.h | 249 | ||||
-rw-r--r-- | include/trace/syscall.h | 10 |
6 files changed, 280 insertions, 265 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c082f223e2fe..3167f2df4126 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -73,18 +73,25 @@ struct trace_iterator { }; +struct trace_event; + typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, - int flags); -struct trace_event { - struct hlist_node node; - struct list_head list; - int type; + int flags, struct trace_event *event); + +struct trace_event_functions { trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + extern int register_ftrace_event(struct trace_event *event); extern int unregister_ftrace_event(struct trace_event *event); @@ -116,28 +123,70 @@ void tracing_record_cmdline(struct task_struct *tsk); struct event_filter; +enum trace_reg { + TRACE_REG_REGISTER, + TRACE_REG_UNREGISTER, + TRACE_REG_PERF_REGISTER, + TRACE_REG_PERF_UNREGISTER, +}; + +struct ftrace_event_call; + +struct ftrace_event_class { + char *system; + void *probe; +#ifdef CONFIG_PERF_EVENTS + void *perf_probe; +#endif + int (*reg)(struct ftrace_event_call *event, + enum trace_reg type); + int (*define_fields)(struct ftrace_event_call *); + struct list_head *(*get_fields)(struct ftrace_event_call *); + struct list_head fields; + int (*raw_init)(struct ftrace_event_call *); +}; + +enum { + TRACE_EVENT_FL_ENABLED_BIT, + TRACE_EVENT_FL_FILTERED_BIT, +}; + +enum { + TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), +}; + struct ftrace_event_call { struct list_head list; + struct ftrace_event_class *class; char *name; - char *system; struct dentry *dir; - struct trace_event *event; - int enabled; - int (*regfunc)(struct ftrace_event_call *); - void (*unregfunc)(struct ftrace_event_call *); - int id; + struct trace_event event; const char *print_fmt; - int (*raw_init)(struct ftrace_event_call *); - int (*define_fields)(struct ftrace_event_call *); - struct list_head fields; - int filter_active; struct event_filter *filter; void *mod; void *data; + /* + * 32 bit flags: + * bit 1: enabled + * bit 2: filter_active + * + * Changes to flags must hold the event_mutex. + * + * Note: Reads of flags do not hold the event_mutex since + * they occur in critical sections. But the way flags + * is currently used, these changes do no affect the code + * except that when a change is made, it may have a slight + * delay in propagating the changes to other CPUs due to + * caching and such. + */ + unsigned int flags; + +#ifdef CONFIG_PERF_EVENTS int perf_refcount; - int (*perf_event_enable)(struct ftrace_event_call *); - void (*perf_event_disable)(struct ftrace_event_call *); + struct hlist_head *perf_events; +#endif }; #define PERF_MAX_TRACE_SIZE 2048 @@ -194,24 +243,22 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int perf_trace_enable(int event_id); -extern void perf_trace_disable(int event_id); -extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, +extern int perf_trace_init(struct perf_event *event); +extern void perf_trace_destroy(struct perf_event *event); +extern int perf_trace_enable(struct perf_event *event); +extern void perf_trace_disable(struct perf_event *event); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void * -perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, - unsigned long *irq_flags); +extern void *perf_trace_buf_prepare(int size, unsigned short type, + struct pt_regs *regs, int *rctxp); static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags, struct pt_regs *regs) + u64 count, struct pt_regs *regs, void *head) { - struct trace_entry *entry = raw_data; - - perf_tp_event(entry->type, addr, count, raw_data, size, regs); + perf_tp_event(addr, count, raw_data, size, regs, head); perf_swevent_put_recursion_context(rctx); - local_irq_restore(irq_flags); } #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3fd5c82e0e18..fb6c91eac7e3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -485,6 +485,7 @@ struct perf_guest_info_callbacks { #include <linux/ftrace.h> #include <linux/cpu.h> #include <asm/atomic.h> +#include <asm/local.h> #define PERF_MAX_STACK_DEPTH 255 @@ -587,21 +588,19 @@ struct perf_mmap_data { struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; + int page_order; /* allocation order */ #endif - int data_order; int nr_pages; /* nr of data pages */ int writable; /* are we writable */ int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event_id limit */ - atomic_long_t head; /* write position */ - atomic_long_t done_head; /* completed head */ - - atomic_t lock; /* concurrent writes */ - atomic_t wakeup; /* needs a wakeup */ - atomic_t lost; /* nr records lost */ + local_t head; /* write position */ + local_t nest; /* nested writers */ + local_t events; /* event limit */ + local_t wakeup; /* wakeup stamp */ + local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ @@ -728,6 +727,7 @@ struct perf_event { perf_overflow_handler_t overflow_handler; #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *tp_event; struct event_filter *filter; #endif @@ -803,11 +803,12 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; + unsigned long wakeup; + unsigned long size; + void *addr; + int page; int nmi; int sample; - int locked; }; #ifdef CONFIG_PERF_EVENTS @@ -993,8 +994,9 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs); +extern void perf_tp_event(u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 057929b0a651..a1a86a53bc73 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -103,22 +103,6 @@ struct perf_event_attr; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) -#ifdef CONFIG_PERF_EVENTS - -#define TRACE_SYS_ENTER_PERF_INIT(sname) \ - .perf_event_enable = perf_sysenter_enable, \ - .perf_event_disable = perf_sysenter_disable, - -#define TRACE_SYS_EXIT_PERF_INIT(sname) \ - .perf_event_enable = perf_sysexit_enable, \ - .perf_event_disable = perf_sysexit_disable, -#else -#define TRACE_SYS_ENTER_PERF(sname) -#define TRACE_SYS_ENTER_PERF_INIT(sname) -#define TRACE_SYS_EXIT_PERF(sname) -#define TRACE_SYS_EXIT_PERF_INIT(sname) -#endif /* CONFIG_PERF_EVENTS */ - #ifdef CONFIG_FTRACE_SYSCALLS #define __SC_STR_ADECL1(t, a) #a #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) @@ -134,54 +118,43 @@ struct perf_event_attr; #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) +extern struct ftrace_event_class event_class_syscall_enter; +extern struct ftrace_event_class event_class_syscall_exit; +extern struct trace_event_functions enter_syscall_print_funcs; +extern struct trace_event_functions exit_syscall_print_funcs; + #define SYSCALL_TRACE_ENTER_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ - static struct trace_event enter_syscall_print_##sname = { \ - .trace = print_syscall_enter, \ - }; \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ event_enter_##sname = { \ .name = "sys_enter"#sname, \ - .system = "syscalls", \ - .event = &enter_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ - .define_fields = syscall_enter_define_fields, \ - .regfunc = reg_event_syscall_enter, \ - .unregfunc = unreg_event_syscall_enter, \ + .class = &event_class_syscall_enter, \ + .event.funcs = &enter_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ - static struct trace_event exit_syscall_print_##sname = { \ - .trace = print_syscall_exit, \ - }; \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ event_exit_##sname = { \ .name = "sys_exit"#sname, \ - .system = "syscalls", \ - .event = &exit_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ - .define_fields = syscall_exit_define_fields, \ - .regfunc = reg_event_syscall_exit, \ - .unregfunc = unreg_event_syscall_exit, \ + .class = &event_class_syscall_exit, \ + .event.funcs = &exit_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ SYSCALL_TRACE_ENTER_EVENT(sname); \ SYSCALL_TRACE_EXIT_EVENT(sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta_##sname = { \ @@ -191,12 +164,14 @@ struct perf_event_attr; .args = args_##sname, \ .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ SYSCALL_TRACE_ENTER_EVENT(_##sname); \ SYSCALL_TRACE_EXIT_EVENT(_##sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta__##sname = { \ @@ -204,6 +179,8 @@ struct perf_event_attr; .nb_args = 0, \ .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1d85f9a6a199..9a59d1f98cd4 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -20,12 +20,17 @@ struct module; struct tracepoint; +struct tracepoint_func { + void *func; + void *data; +}; + struct tracepoint { const char *name; /* Tracepoint name */ int state; /* State. */ void (*regfunc)(void); void (*unregfunc)(void); - void **funcs; + struct tracepoint_func *funcs; } __attribute__((aligned(32))); /* * Aligned on 32 bytes because it is * globally visible and gcc happily @@ -37,16 +42,19 @@ struct tracepoint { * Connect a probe to a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_register(const char *name, void *probe); +extern int tracepoint_probe_register(const char *name, void *probe, void *data); /* * Disconnect a probe from a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_unregister(const char *name, void *probe); +extern int +tracepoint_probe_unregister(const char *name, void *probe, void *data); -extern int tracepoint_probe_register_noupdate(const char *name, void *probe); -extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); +extern int tracepoint_probe_register_noupdate(const char *name, void *probe, + void *data); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, + void *data); extern void tracepoint_probe_update_all(void); struct tracepoint_iter { @@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. + * + * Note, the proto and args passed in includes "__data" as the first parameter. + * The reason for this is to handle the "void" prototype. If a tracepoint + * has a "void" prototype, then it is invalid to declare a function + * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just + * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ #define __DO_TRACE(tp, proto, args) \ do { \ - void **it_func; \ + struct tracepoint_func *it_func_ptr; \ + void *it_func; \ + void *__data; \ \ rcu_read_lock_sched_notrace(); \ - it_func = rcu_dereference_sched((tp)->funcs); \ - if (it_func) { \ + it_func_ptr = rcu_dereference_sched((tp)->funcs); \ + if (it_func_ptr) { \ do { \ - ((void(*)(proto))(*it_func))(args); \ - } while (*(++it_func)); \ + it_func = (it_func_ptr)->func; \ + __data = (it_func_ptr)->data; \ + ((void(*)(proto))(it_func))(args); \ + } while ((++it_func_ptr)->func); \ } \ rcu_read_unlock_sched_notrace(); \ } while (0) @@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. */ -#define DECLARE_TRACE(name, proto, args) \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ - TP_PROTO(proto), TP_ARGS(args)); \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args)); \ + } \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), void *data) \ + { \ + return tracepoint_probe_register(#name, (void *)probe, \ + data); \ } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ - return tracepoint_probe_register(#name, (void *)probe); \ + return tracepoint_probe_unregister(#name, (void *)probe, \ + data); \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline void \ + check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ - return tracepoint_probe_unregister(#name, (void *)probe);\ } - #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ __attribute__((section("__tracepoints_strings"))) = #name; \ @@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, EXPORT_SYMBOL(__tracepoint_##name) #else /* !CONFIG_TRACEPOINTS */ -#define DECLARE_TRACE(name, proto, args) \ - static inline void _do_trace_##name(struct tracepoint *tp, proto) \ - { } \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ + } \ + static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ + { \ } #define DEFINE_TRACE_FN(name, reg, unreg) @@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, #define EXPORT_TRACEPOINT_SYMBOL(name) #endif /* CONFIG_TRACEPOINTS */ + +/* + * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype + * (void). "void" is a special value in a function prototype and can + * not be combined with other arguments. Since the DECLARE_TRACE() + * macro adds a data element at the beginning of the prototype, + * we need a way to differentiate "(void *data, proto)" from + * "(void *data, void)". The second prototype is invalid. + * + * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype + * and "void *__data" as the callback prototype. + * + * DECLARE_TRACE() passes "proto" as the tracepoint protoype and + * "void *__data, proto" as the callback prototype. + */ +#define DECLARE_TRACE_NOARGS(name) \ + __DECLARE_TRACE(name, void, , void *__data, __data) + +#define DECLARE_TRACE(name, proto, args) \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 88c59c13ea7b..3d685d1f2a03 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -62,10 +62,13 @@ struct trace_entry ent; \ tstruct \ char __data[0]; \ - }; + }; \ + \ + static struct ftrace_event_class event_class_##name; + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ - static struct ftrace_event_call \ + static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) event_##name #undef DEFINE_EVENT_PRINT @@ -147,7 +150,7 @@ * * entry = iter->ent; * - * if (entry->type != event_<call>.id) { + * if (entry->type != event_<call>->event.type) { * WARN_ON_ONCE(1); * return TRACE_TYPE_UNHANDLED; * } @@ -206,18 +209,22 @@ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ -ftrace_raw_output_id_##call(int event_id, const char *name, \ - struct trace_iterator *iter, int flags) \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ { \ + struct ftrace_event_call *event; \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##call *field; \ struct trace_entry *entry; \ struct trace_seq *p; \ int ret; \ \ + event = container_of(trace_event, struct ftrace_event_call, \ + event); \ + \ entry = iter->ent; \ \ - if (entry->type != event_id) { \ + if (entry->type != event->event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ @@ -226,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ \ p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", name); \ + ret = trace_seq_printf(s, "%s: ", event->name); \ if (ret) \ ret = trace_seq_printf(s, print); \ put_cpu(); \ @@ -234,21 +241,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ -} - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ -static notrace enum print_line_t \ -ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ -{ \ - return ftrace_raw_output_id_##template(event_##name.id, \ - #name, iter, flags); \ -} +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *event) \ { \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##template *field; \ @@ -258,7 +260,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ \ entry = iter->ent; \ \ - if (entry->type != event_##call.id) { \ + if (entry->type != event_##call.event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ @@ -275,7 +277,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ -} +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -381,80 +386,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#ifdef CONFIG_PERF_EVENTS - -/* - * Generate the functions needed for tracepoint perf_event support. - * - * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later - * - * static int ftrace_profile_enable_<call>(void) - * { - * return register_trace_<call>(ftrace_profile_<call>); - * } - * - * static void ftrace_profile_disable_<call>(void) - * { - * unregister_trace_<call>(ftrace_profile_<call>); - * } - * - */ - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ - \ -static void perf_trace_##name(proto); \ - \ -static notrace int \ -perf_trace_enable_##name(struct ftrace_event_call *unused) \ -{ \ - return register_trace_##name(perf_trace_##name); \ -} \ - \ -static notrace void \ -perf_trace_disable_##name(struct ftrace_event_call *unused) \ -{ \ - unregister_trace_##name(perf_trace_##name); \ -} - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - -#endif /* CONFIG_PERF_EVENTS */ - /* * Stage 4 of the trace events. * * Override the macros in <trace/trace_events.h> to include the following: * - * static void ftrace_event_<call>(proto) - * { - * event_trace_printk(_RET_IP_, "<call>: " <fmt>); - * } - * - * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) - * { - * return register_trace_<call>(ftrace_event_<call>); - * } - * - * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) - * { - * unregister_trace_<call>(ftrace_event_<call>); - * } - * - * * For those macros defined with TRACE_EVENT: * * static struct ftrace_event_call event_<call>; * - * static void ftrace_raw_event_<call>(proto) + * static void ftrace_raw_event_<call>(void *__data, proto) * { + * struct ftrace_event_call *event_call = __data; * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_<call> *entry; <-- defined in stage 1 @@ -469,7 +412,7 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); * * event = trace_current_buffer_lock_reserve(&buffer, - * event_<call>.id, + * event_<call>->event.type, * sizeof(*entry) + __data_size, * irq_flags, pc); * if (!event) @@ -484,43 +427,42 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * event, irq_flags, pc); * } * - * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) - * { - * return register_trace_<call>(ftrace_raw_event_<call>); - * } - * - * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) - * { - * unregister_trace_<call>(ftrace_raw_event_<call>); - * } - * * static struct trace_event ftrace_event_type_<call> = { * .trace = ftrace_raw_output_<call>, <-- stage 2 * }; * * static const char print_fmt_<call>[] = <TP_printk>; * + * static struct ftrace_event_class __used event_class_<template> = { + * .system = "<system>", + * .define_fields = ftrace_define_fields_<call>, + * .fields = LIST_HEAD_INIT(event_class_##call.fields), + * .raw_init = trace_event_raw_init, + * .probe = ftrace_raw_event_##call, + * }; + * * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_<call> = { * .name = "<call>", - * .system = "<system>", - * .raw_init = trace_event_raw_init, - * .regfunc = ftrace_reg_event_<call>, - * .unregfunc = ftrace_unreg_event_<call>, + * .class = event_class_<template>, + * .event = &ftrace_event_type_<call>, * .print_fmt = print_fmt_<call>, - * .define_fields = ftrace_define_fields_<call>, - * } + * }; * */ #ifdef CONFIG_PERF_EVENTS +#define _TRACE_PERF_PROTO(call, proto) \ + static notrace void \ + perf_trace_##call(void *__data, proto); + #define _TRACE_PERF_INIT(call) \ - .perf_event_enable = perf_trace_enable_##call, \ - .perf_event_disable = perf_trace_disable_##call, + .perf_probe = perf_trace_##call, #else +#define _TRACE_PERF_PROTO(call, proto) #define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ @@ -554,9 +496,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ \ static notrace void \ -ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ - proto) \ +ftrace_raw_event_##call(void *__data, proto) \ { \ + struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ring_buffer_event *event; \ struct ftrace_raw_##call *entry; \ @@ -571,7 +513,7 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ \ event = trace_current_buffer_lock_reserve(&buffer, \ - event_call->id, \ + event_call->event.type, \ sizeof(*entry) + __data_size, \ irq_flags, pc); \ if (!event) \ @@ -586,34 +528,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ trace_nowake_buffer_unlock_commit(buffer, \ event, irq_flags, pc); \ } +/* + * The ftrace_test_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ - \ -static notrace void ftrace_raw_event_##call(proto) \ -{ \ - ftrace_raw_event_id_##template(&event_##call, args); \ -} \ - \ -static notrace int \ -ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ +static inline void ftrace_test_probe_##call(void) \ { \ - return register_trace_##call(ftrace_raw_event_##call); \ -} \ - \ -static notrace void \ -ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ -{ \ - unregister_trace_##call(ftrace_raw_event_##call); \ -} \ - \ -static struct trace_event ftrace_event_type_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; + check_trace_callback_type_##call(ftrace_raw_event_##template); \ +} #undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -630,7 +559,16 @@ static struct trace_event ftrace_event_type_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ -static const char print_fmt_##call[] = print; +_TRACE_PERF_PROTO(call, PARAMS(proto)); \ +static const char print_fmt_##call[] = print; \ +static struct ftrace_event_class __used event_class_##call = { \ + .system = __stringify(TRACE_SYSTEM), \ + .define_fields = ftrace_define_fields_##call, \ + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ + .raw_init = trace_event_raw_init, \ + .probe = ftrace_raw_event_##call, \ + _TRACE_PERF_INIT(call) \ +}; #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ @@ -639,15 +577,10 @@ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ - .system = __stringify(TRACE_SYSTEM), \ - .event = &ftrace_event_type_##call, \ - .raw_init = trace_event_raw_init, \ - .regfunc = ftrace_raw_reg_event_##call, \ - .unregfunc = ftrace_raw_unreg_event_##call, \ + .class = &event_class_##template, \ + .event.funcs = &ftrace_event_type_funcs_##template, \ .print_fmt = print_fmt_##template, \ - .define_fields = ftrace_define_fields_##template, \ - _TRACE_PERF_INIT(call) \ -} +}; #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ @@ -658,14 +591,9 @@ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ - .system = __stringify(TRACE_SYSTEM), \ - .event = &ftrace_event_type_##call, \ - .raw_init = trace_event_raw_init, \ - .regfunc = ftrace_raw_reg_event_##call, \ - .unregfunc = ftrace_raw_unreg_event_##call, \ + .class = &event_class_##template, \ + .event.funcs = &ftrace_event_type_funcs_##call, \ .print_fmt = print_fmt_##call, \ - .define_fields = ftrace_define_fields_##template, \ - _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -765,17 +693,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -perf_trace_templ_##call(struct ftrace_event_call *event_call, \ - struct pt_regs *__regs, proto) \ +perf_trace_##call(void *__data, proto) \ { \ + struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ + struct pt_regs __regs; \ u64 __addr = 0, __count = 1; \ - unsigned long irq_flags; \ + struct hlist_head *head; \ int __entry_size; \ int __data_size; \ int rctx; \ \ + perf_fetch_caller_regs(&__regs, 1); \ + \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ sizeof(u64)); \ @@ -784,32 +715,34 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ + \ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ - __entry_size, event_call->id, &rctx, &irq_flags); \ + __entry_size, event_call->event.type, &__regs, &rctx); \ if (!entry) \ return; \ + \ tstruct \ \ { assign; } \ \ + head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags, __regs); \ + __count, &__regs, head); \ } +/* + * This part is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the + * perf probe will fail to compile unless it too is updated. + */ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void perf_trace_##call(proto) \ +static inline void perf_test_probe_##call(void) \ { \ - struct ftrace_event_call *event_call = &event_##call; \ - struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \ - \ - perf_fetch_caller_regs(__regs, 1); \ - \ - perf_trace_templ_##template(event_call, __regs, args); \ - \ - put_cpu_var(perf_trace_regs); \ + check_trace_callback_type_##call(perf_trace_##template); \ } + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) diff --git a/include/trace/syscall.h b/include/trace/syscall.h index e5e5f48dbfb3..257e08960d7b 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -25,6 +25,8 @@ struct syscall_metadata { int nb_args; const char **types; const char **args; + struct list_head enter_fields; + struct list_head exit_fields; struct ftrace_event_call *enter_event; struct ftrace_event_call *exit_event; @@ -34,16 +36,16 @@ struct syscall_metadata { extern unsigned long arch_syscall_addr(int nr); extern int init_syscall_trace(struct ftrace_event_call *call); -extern int syscall_enter_define_fields(struct ftrace_event_call *call); -extern int syscall_exit_define_fields(struct ftrace_event_call *call); extern int reg_event_syscall_enter(struct ftrace_event_call *call); extern void unreg_event_syscall_enter(struct ftrace_event_call *call); extern int reg_event_syscall_exit(struct ftrace_event_call *call); extern void unreg_event_syscall_exit(struct ftrace_event_call *call); extern int ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); -enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); -enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); +enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, + struct trace_event *event); +enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, + struct trace_event *event); #endif #ifdef CONFIG_PERF_EVENTS |