diff options
author | David S. Miller <davem@davemloft.net> | 2016-06-16 09:42:58 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-16 09:42:58 +0300 |
commit | f0362eab22f20e7060870321c837e1e5f68c45fb (patch) | |
tree | b4f7d122f21e841f0057c624e064f8ca30622e48 /include | |
parent | b478af0cd7957faca83779fe6832abae163f7159 (diff) | |
parent | 3b1efb196eee45b2f0c4994e0c43edb5e367f620 (diff) | |
download | linux-f0362eab22f20e7060870321c837e1e5f68c45fb.tar.xz |
Merge branch 'bpf-fd-array-release'
Daniel Borkmann says:
====================
bpf: improve fd array release
This set improves BPF perf fd array map release wrt to purging
entries, first two extend the API as needed. Please see individual
patches for more details.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bpf.h | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1bcae82c6cb1..9adfef694a25 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -13,13 +13,15 @@ #include <linux/percpu.h> #include <linux/err.h> +struct perf_event; struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ struct bpf_map *(*map_alloc)(union bpf_attr *attr); - void (*map_free)(struct bpf_map *); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); /* funcs callable from userspace and from eBPF programs */ @@ -28,8 +30,9 @@ struct bpf_map_ops { int (*map_delete_elem)(struct bpf_map *map, void *key); /* funcs called by prog_array and perf_event_array map */ - void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); - void (*map_fd_put_ptr) (void *ptr); + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); }; struct bpf_map { @@ -164,11 +167,19 @@ struct bpf_array { void __percpu *pptrs[0] __aligned(8); }; }; + #define MAX_TAIL_CALL_CNT 32 +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_fd_array_map_clear(struct bpf_map *map); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -206,8 +217,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, u64 flags); int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); + int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +void bpf_fd_array_map_clear(struct bpf_map *map); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent |