summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-20 07:06:27 +0300
committerDavid S. Miller <davem@davemloft.net>2019-06-20 07:06:27 +0300
commitdca73a65a68329ee386d3ff473152bac66eaab39 (patch)
tree97c41afb932bdd6cbe67e7ffc38bfe5952c97798 /include/linux
parent497ad9f5b2dc86b733761b9afa44ecfa2f17be65 (diff)
parent94079b64255fe40b9b53fd2e4081f68b9b14f54a (diff)
downloadlinux-dca73a65a68329ee386d3ff473152bac66eaab39.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-06-19 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) new SO_REUSEPORT_DETACH_BPF setsocktopt, from Martin. 2) BTF based map definition, from Andrii. 3) support bpf_map_lookup_elem for xskmap, from Jonathan. 4) bounded loops and scalar precision logic in the verifier, from Alexei. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h25
-rw-r--r--include/linux/bpf_verifier.h69
2 files changed, 93 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9f7c453db70c..a62e7889b0b6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -277,6 +277,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
+ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
};
/* The information passed from prog-specific *_is_valid_access
@@ -1098,6 +1099,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
+
+bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
#else
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -1114,6 +1124,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
+static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
#endif /* CONFIG_INET */
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 026ba8b81e88..5fe99f322b1c 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -136,6 +136,8 @@ struct bpf_reg_state {
*/
s32 subreg_def;
enum bpf_reg_liveness live;
+ /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
+ bool precise;
};
enum bpf_stack_slot_type {
@@ -187,14 +189,77 @@ struct bpf_func_state {
struct bpf_stack_state *stack;
};
+struct bpf_idx_pair {
+ u32 prev_idx;
+ u32 idx;
+};
+
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ /*
+ * 'branches' field is the number of branches left to explore:
+ * 0 - all possible paths from this state reached bpf_exit or
+ * were safely pruned
+ * 1 - at least one path is being explored.
+ * This state hasn't reached bpf_exit
+ * 2 - at least two paths are being explored.
+ * This state is an immediate parent of two children.
+ * One is fallthrough branch with branches==1 and another
+ * state is pushed into stack (to be explored later) also with
+ * branches==1. The parent of this state has branches==1.
+ * The verifier state tree connected via 'parent' pointer looks like:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 2 -> 1 (second 'if' pushed into stack)
+ * 1
+ * 1
+ * 1 bpf_exit.
+ *
+ * Once do_check() reaches bpf_exit, it calls update_branch_counts()
+ * and the verifier state tree will look:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 1 -> 1 (second 'if' pushed into stack)
+ * 0
+ * 0
+ * 0 bpf_exit.
+ * After pop_stack() the do_check() will resume at second 'if'.
+ *
+ * If is_state_visited() sees a state with branches > 0 it means
+ * there is a loop. If such state is exactly equal to the current state
+ * it's an infinite loop. Note states_equal() checks for states
+ * equvalency, so two states being 'states_equal' does not mean
+ * infinite loop. The exact comparison is provided by
+ * states_maybe_looping() function. It's a stronger pre-check and
+ * much faster than states_equal().
+ *
+ * This algorithm may not find all possible infinite loops or
+ * loop iteration count may be too high.
+ * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
+ */
+ u32 branches;
u32 insn_idx;
u32 curframe;
u32 active_spin_lock;
bool speculative;
+
+ /* first and last insn idx of this verifier state */
+ u32 first_insn_idx;
+ u32 last_insn_idx;
+ /* jmp history recorded from first to last.
+ * backtracking is using it to go from last to first.
+ * For most states jmp_history_cnt is [0-3].
+ * For loops can go up to ~40.
+ */
+ struct bpf_idx_pair *jmp_history;
+ u32 jmp_history_cnt;
};
#define bpf_get_spilled_reg(slot, frame) \
@@ -309,7 +374,9 @@ struct bpf_verifier_env {
} cfg;
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
- u32 insn_processed;
+ u32 prev_insn_processed, insn_processed;
+ /* number of jmps, calls, exits analyzed so far */
+ u32 prev_jmps_processed, jmps_processed;
/* total verification time */
u64 verification_time;
/* maximum number of verifier states kept in 'branching' instructions */