summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2019-05-10 21:43:46 +0300
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2019-05-10 21:43:46 +0300
commit2a267e7c41aa88215de2b542de797d03d16ecdfd (patch)
treeb949270835e304c8b60a40cde1b2c2e19c13b33a /include/linux/bpf.h
parent0981949da8f7498b96c6e0ae60680865ca283bf1 (diff)
parente93c9c99a629c61837d5a7fc2120cd2b6c70dbdd (diff)
downloadlinux-2a267e7c41aa88215de2b542de797d03d16ecdfd.tar.xz
Merge tag 'v5.1' into next
Sync up with mainline to bring in the latest APIs.
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h146
1 files changed, 137 insertions, 9 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 33014ae73103..944ccc310201 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -16,6 +16,7 @@
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
#include <linux/wait.h>
+#include <linux/u64_stats_sync.h>
struct bpf_verifier_env;
struct perf_event;
@@ -23,6 +24,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
+struct btf;
struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -52,6 +54,7 @@ struct bpf_map_ops {
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
};
@@ -70,14 +73,15 @@ struct bpf_map {
u32 value_size;
u32 max_entries;
u32 map_flags;
- u32 pages;
+ int spin_lock_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
+ u32 pages;
bool unpriv_array;
- /* 55 bytes hole */
+ /* 51 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
@@ -89,6 +93,36 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+{
+ return map->spin_lock_off >= 0;
+}
+
+static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+{
+ if (likely(!map_value_has_spin_lock(map)))
+ return;
+ *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
+ (struct bpf_spin_lock){};
+}
+
+/* copy everything but bpf_spin_lock */
+static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+{
+ if (unlikely(map_value_has_spin_lock(map))) {
+ u32 off = map->spin_lock_off;
+
+ memcpy(dst, src, off);
+ memcpy(dst + off + sizeof(struct bpf_spin_lock),
+ src + off + sizeof(struct bpf_spin_lock),
+ map->value_size - off - sizeof(struct bpf_spin_lock));
+ } else {
+ memcpy(dst, src, map->value_size);
+ }
+}
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ bool lock_src);
+
struct bpf_offload_dev;
struct bpf_offloaded_map;
@@ -126,6 +160,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
}
int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
@@ -158,7 +193,8 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
- ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
+ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
+ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
};
/* type of values returned from helper functions */
@@ -168,6 +204,7 @@ enum bpf_return_type {
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
+ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -221,6 +258,10 @@ enum bpf_reg_type {
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
+ PTR_TO_SOCK_COMMON, /* reg points to sock_common */
+ PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
+ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
+ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -265,18 +306,28 @@ struct bpf_verifier_ops {
};
struct bpf_prog_offload_ops {
+ /* verifier basic callbacks */
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
+ /* verifier optimization callbacks (called after .finalize) */
+ int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
+ /* program management callbacks */
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
};
struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- const struct bpf_prog_offload_ops *dev_ops;
+ bool opt_failed;
void *jited_image;
u32 jited_len;
};
@@ -289,13 +340,21 @@ enum bpf_cgroup_storage_type {
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+struct bpf_prog_stats {
+ u64 cnt;
+ u64 nsecs;
+ struct u64_stats_sync syncp;
+};
+
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
u32 stack_depth;
u32 id;
- u32 func_cnt;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
@@ -312,6 +371,31 @@ struct bpf_prog_aux {
void *security;
#endif
struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
+ struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -365,6 +449,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
/* an array of programs to be executed under rcu_lock.
*
@@ -423,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
} \
_out: \
rcu_read_unlock(); \
- preempt_enable_no_resched(); \
+ preempt_enable(); \
_ret; \
})
@@ -479,6 +566,7 @@ void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
+extern int sysctl_bpf_stats_enabled;
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
@@ -523,7 +611,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
@@ -691,8 +780,10 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
-struct bpf_offload_dev *bpf_offload_dev_create(void);
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
@@ -835,7 +926,8 @@ extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
-
+extern const struct bpf_func_proto bpf_spin_lock_proto;
+extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
/* Shared helpers among cBPF and eBPF. */
@@ -843,6 +935,9 @@ void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#if defined(CONFIG_NET)
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
@@ -851,6 +946,12 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_prog *prog,
u32 *target_size);
#else
+static inline bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
static inline bool bpf_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
@@ -867,4 +968,31 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
}
#endif
+#ifdef CONFIG_INET
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif /* CONFIG_INET */
+
#endif /* _LINUX_BPF_H */