summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-07-16 00:23:56 +0300
committerDavid S. Miller <davem@davemloft.net>2016-07-16 00:23:56 +0300
commite980a0764a16897294207eaf4556c3038768adb9 (patch)
tree51e4fcdea68602c29e21fdd23519a214b2208ed6 /include/linux
parent7acef60455c4814a52afb8629d166a3b4dfa0ebb (diff)
parent555c8a8623a3a87b3c990ba30b7fd2e5914e41d2 (diff)
downloadlinux-e980a0764a16897294207eaf4556c3038768adb9.tar.xz
Merge branch 'bpf-event-output-helper-improvements'
Daniel Borkmann says: ==================== BPF event output helper improvements This set adds improvements to the BPF event output helper to support non-linear data sampling, here specifically, for skb context. For details please see individual patches. The set is based against net-next tree. v1 -> v2: - Integrated and adapted Peter's diff into patch 1, updated the remaining ones accordingly. Thanks Peter! ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h7
-rw-r--r--include/linux/perf_event.h20
2 files changed, 25 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b3336b4f5d04..c13e92b00bf5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -209,7 +209,12 @@ u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
-const struct bpf_func_proto *bpf_get_event_output_proto(void);
+
+typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
+ unsigned long len);
+
+u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1a827cecd62f..e79e6c6fed89 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -69,9 +69,22 @@ struct perf_callchain_entry_ctx {
bool contexts_maxed;
};
+typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
+ unsigned long len);
+
+struct perf_raw_frag {
+ union {
+ struct perf_raw_frag *next;
+ unsigned long pad;
+ };
+ perf_copy_f copy;
+ void *data;
+ u32 size;
+} __packed;
+
struct perf_raw_record {
+ struct perf_raw_frag frag;
u32 size;
- void *data;
};
/*
@@ -1283,6 +1296,11 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
+
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
/*