summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2017-03-16 04:26:42 +0300
committerDavid S. Miller <davem@davemloft.net>2017-03-17 06:44:11 +0300
commit81ed18ab3098b6519274545e80a29caacb77d160 (patch)
tree62cf05acb5648f25efd010110c6f40b70d1371c6 /include/linux/bpf.h
parent8041902dae5299c1f194ba42d14383f734631009 (diff)
downloadlinux-81ed18ab3098b6519274545e80a29caacb77d160.tar.xz
bpf: add helper inlining infra and optimize map_array lookup
Optimize bpf_call -> bpf_map_lookup_elem() -> array_map_lookup_elem() into a sequence of bpf instructions. When JIT is on the sequence of bpf instructions is the sequence of native cpu instructions with significantly faster performance than indirect call and two function's prologue/epilogue. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 909fc033173a..da8c64ca8dc9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -35,6 +35,7 @@ struct bpf_map_ops {
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
+ u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
};
struct bpf_map {