diff options
| author | David S. Miller <davem@davemloft.net> | 2016-07-16 00:23:56 +0300 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2016-07-16 00:23:56 +0300 |
| commit | e980a0764a16897294207eaf4556c3038768adb9 (patch) | |
| tree | 51e4fcdea68602c29e21fdd23519a214b2208ed6 /include | |
| parent | 7acef60455c4814a52afb8629d166a3b4dfa0ebb (diff) | |
| parent | 555c8a8623a3a87b3c990ba30b7fd2e5914e41d2 (diff) | |
| download | linux-e980a0764a16897294207eaf4556c3038768adb9.tar.xz | |
Merge branch 'bpf-event-output-helper-improvements'
Daniel Borkmann says:
====================
BPF event output helper improvements
This set adds improvements to the BPF event output helper to
support non-linear data sampling, here specifically, for skb
context. For details please see individual patches. The set
is based against net-next tree.
v1 -> v2:
- Integrated and adapted Peter's diff into patch 1, updated
the remaining ones accordingly. Thanks Peter!
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/bpf.h | 7 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 20 | ||||
| -rw-r--r-- | include/uapi/linux/bpf.h | 2 |
3 files changed, 27 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b3336b4f5d04..c13e92b00bf5 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -209,7 +209,12 @@ u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); -const struct bpf_func_proto *bpf_get_event_output_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long len); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a827cecd62f..e79e6c6fed89 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -69,9 +69,22 @@ struct perf_callchain_entry_ctx { bool contexts_maxed; }; +typedef unsigned long (*perf_copy_f)(void *dst, const void *src, + unsigned long len); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __packed; + struct perf_raw_record { + struct perf_raw_frag frag; u32 size; - void *data; }; /* @@ -1283,6 +1296,11 @@ extern void perf_restore_debug_store(void); static inline void perf_restore_debug_store(void) { } #endif +static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) +{ + return frag->pad < sizeof(u64); +} + #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 262a7e883b19..c4d922439d20 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -401,6 +401,8 @@ enum bpf_func_id { /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ #define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +/* BPF_FUNC_perf_event_output for sk_buff input context. */ +#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure |
