diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 00:05:08 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 00:05:08 +0300 |
commit | 31486372a1e9a66ec2e9e2903b8792bba7e503e1 (patch) | |
tree | 84f61e0758695e6fbee8d2b7d7b9bc4a5ce6e03b /include/linux | |
parent | 8e9a2dba8686187d8c8179e5b86640e653963889 (diff) | |
parent | fcdfafcb73be8fa45909327bbddca46fb362a675 (diff) | |
download | linux-31486372a1e9a66ec2e9e2903b8792bba7e503e1.tar.xz |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"The main changes in this cycle were:
Kernel:
- kprobes updates: use better W^X patterns for code modifications,
improve optprobes, remove jprobes. (Masami Hiramatsu, Kees Cook)
- core fixes: event timekeeping (enabled/running times statistics)
fixes, perf_event_read() locking fixes and cleanups, etc. (Peter
Zijlstra)
- Extend x86 Intel free-running PEBS support and support x86
user-register sampling in perf record and perf script. (Andi Kleen)
Tooling:
- Completely rework the way inline frames are handled. Instead of
querying for the inline nodes on-demand in the individual tools, we
now create proper callchain nodes for inlined frames. (Milian
Wolff)
- 'perf trace' updates (Arnaldo Carvalho de Melo)
- Implement a way to print formatted output to per-event files in
'perf script' to facilitate generate flamegraphs, elliminating the
need to write scripts to do that separation (yuzhoujian, Arnaldo
Carvalho de Melo)
- Update vendor events JSON metrics for Intel's Broadwell, Broadwell
Server, Haswell, Haswell Server, IvyBridge, IvyTown, JakeTown,
Sandy Bridge, Skylake, SkyLake Server - and Goldmont Plus V1 (Andi
Kleen, Kan Liang)
- Multithread the synthesizing of PERF_RECORD_ events for
pre-existing threads in 'perf top', speeding up that phase, greatly
improving the user experience in systems such as Intel's Knights
Mill (Kan Liang)
- Introduce the concept of weak groups in 'perf stat': try to set up
a group, but if it's not schedulable fallback to not using a group.
That gives us the best of both worlds: groups if they work, but
still a usable fallback if they don't. E.g: (Andi Kleen)
- perf sched timehist enhancements (David Ahern)
- ... various other enhancements, updates, cleanups and fixes"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (139 commits)
kprobes: Don't spam the build log with deprecation warnings
arm/kprobes: Remove jprobe test case
arm/kprobes: Fix kretprobe test to check correct counter
perf srcline: Show correct function name for srcline of callchains
perf srcline: Fix memory leak in addr2inlines()
perf trace beauty kcmp: Beautify arguments
perf trace beauty: Implement pid_fd beautifier
tools include uapi: Grab a copy of linux/kcmp.h
perf callchain: Fix double mapping al->addr for children without self period
perf stat: Make --per-thread update shadow stats to show metrics
perf stat: Move the shadow stats scale computation in perf_stat__update_shadow_stats
perf tools: Add perf_data_file__write function
perf tools: Add struct perf_data_file
perf tools: Rename struct perf_data_file to perf_data
perf script: Print information about per-event-dump files
perf trace beauty prctl: Generate 'option' string table from kernel headers
tools include uapi: Grab a copy of linux/prctl.h
perf script: Allow creating per-event dump files
perf evsel: Restore evsel->priv as a tool private area
perf script: Use event_format__fprintf()
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/kprobes.h | 36 | ||||
-rw-r--r-- | include/linux/perf_event.h | 32 |
2 files changed, 25 insertions, 43 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index bd2684700b74..9440a2fc8893 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -391,10 +391,6 @@ int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num); int setjmp_pre_handler(struct kprobe *, struct pt_regs *); int longjmp_break_handler(struct kprobe *, struct pt_regs *); -int register_jprobe(struct jprobe *p); -void unregister_jprobe(struct jprobe *p); -int register_jprobes(struct jprobe **jps, int num); -void unregister_jprobes(struct jprobe **jps, int num); void jprobe_return(void); unsigned long arch_deref_entry_point(void *); @@ -443,20 +439,6 @@ static inline void unregister_kprobe(struct kprobe *p) static inline void unregister_kprobes(struct kprobe **kps, int num) { } -static inline int register_jprobe(struct jprobe *p) -{ - return -ENOSYS; -} -static inline int register_jprobes(struct jprobe **jps, int num) -{ - return -ENOSYS; -} -static inline void unregister_jprobe(struct jprobe *p) -{ -} -static inline void unregister_jprobes(struct jprobe **jps, int num) -{ -} static inline void jprobe_return(void) { } @@ -486,6 +468,20 @@ static inline int enable_kprobe(struct kprobe *kp) return -ENOSYS; } #endif /* CONFIG_KPROBES */ +static inline int register_jprobe(struct jprobe *p) +{ + return -ENOSYS; +} +static inline int register_jprobes(struct jprobe **jps, int num) +{ + return -ENOSYS; +} +static inline void unregister_jprobe(struct jprobe *p) +{ +} +static inline void unregister_jprobes(struct jprobe **jps, int num) +{ +} static inline int disable_kretprobe(struct kretprobe *rp) { return disable_kprobe(&rp->kp); @@ -496,11 +492,11 @@ static inline int enable_kretprobe(struct kretprobe *rp) } static inline int disable_jprobe(struct jprobe *jp) { - return disable_kprobe(&jp->kp); + return -ENOSYS; } static inline int enable_jprobe(struct jprobe *jp) { - return enable_kprobe(&jp->kp); + return -ENOSYS; } #ifndef CONFIG_KPROBES diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8e22f24ded6a..874b71a70058 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -485,9 +485,9 @@ struct perf_addr_filters_head { }; /** - * enum perf_event_active_state - the states of a event + * enum perf_event_state - the states of a event */ -enum perf_event_active_state { +enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, @@ -578,7 +578,7 @@ struct perf_event { struct pmu *pmu; void *pmu_private; - enum perf_event_active_state state; + enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; @@ -588,26 +588,10 @@ struct perf_event { * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. - * - * They are computed from tstamp_enabled, tstamp_running and - * tstamp_stopped when the event is in INACTIVE or ACTIVE state. */ u64 total_time_enabled; u64 total_time_running; - - /* - * These are timestamps used for computing total_time_enabled - * and total_time_running when the event is in INACTIVE or - * ACTIVE state, measured in nanoseconds from an arbitrary point - * in time. - * tstamp_enabled: the notional time when the event was enabled - * tstamp_running: the notional time when the event was scheduled on - * tstamp_stopped: in INACTIVE state, the notional time when the - * event was scheduled off. - */ - u64 tstamp_enabled; - u64 tstamp_running; - u64 tstamp_stopped; + u64 tstamp; /* * timestamp shadows the actual context timing but it can @@ -699,7 +683,6 @@ struct perf_event { #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ - int cgrp_defer_enabled; #endif struct list_head sb_list; @@ -806,6 +789,7 @@ struct perf_output_handle { struct bpf_perf_event_data_kern { struct pt_regs *regs; struct perf_sample_data *data; + struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF @@ -884,7 +868,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -int perf_event_read_local(struct perf_event *event, u64 *value); +int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1286,7 +1271,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * { return ERR_PTR(-EINVAL); } -static inline int perf_event_read_local(struct perf_event *event, u64 *value) +static inline int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running) { return -EINVAL; } |