diff options
| author | Yonghong Song <yhs@fb.com> | 2017-06-14 01:52:13 +0300 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-06-14 21:56:25 +0300 | 
| commit | 31fd85816dbe3a714bcc3f67c17c3dd87011f79e (patch) | |
| tree | d8c694e4997605254ea96a76c5d633f60ee091cf /kernel/trace/bpf_trace.c | |
| parent | a88e2676a6cd3352c2f590f872233d83d8db289c (diff) | |
| download | linux-31fd85816dbe3a714bcc3f67c17c3dd87011f79e.tar.xz | |
bpf: permits narrower load from bpf program context fields
Currently, verifier will reject a program if it contains an
narrower load from the bpf context structure. For example,
        __u8 h = __sk_buff->hash, or
        __u16 p = __sk_buff->protocol
        __u32 sample_period = bpf_perf_event_data->sample_period
which are narrower loads of 4-byte or 8-byte field.
This patch solves the issue by:
  . Introduce a new parameter ctx_field_size to carry the
    field size of narrower load from prog type
    specific *__is_valid_access validator back to verifier.
  . The non-zero ctx_field_size for a memory access indicates
    (1). underlying prog type specific convert_ctx_accesses
         supporting non-whole-field access
    (2). the current insn is a narrower or whole field access.
  . In verifier, for such loads where load memory size is
    less than ctx_field_size, verifier transforms it
    to a full field load followed by proper masking.
  . Currently, __sk_buff and bpf_perf_event_data->sample_period
    are supporting narrowing loads.
  . Narrower stores are still not allowed as typical ctx stores
    are just normal stores.
Because of this change, some tests in verifier will fail and
these tests are removed. As a bonus, rename some out of bound
__sk_buff->cb access to proper field name and remove two
redundant "skb cb oob" tests.
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 21 | 
1 files changed, 15 insertions, 6 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 051d7fca0c09..9d3ec8253131 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func  /* bpf+kprobe programs can access fields of 'struct pt_regs' */  static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, -					enum bpf_reg_type *reg_type) +					enum bpf_reg_type *reg_type, int *ctx_field_size)  {  	if (off < 0 || off >= sizeof(struct pt_regs))  		return false; @@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)  }  static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, -				    enum bpf_reg_type *reg_type) +				    enum bpf_reg_type *reg_type, int *ctx_field_size)  {  	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)  		return false; @@ -581,17 +581,26 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {  };  static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, -				    enum bpf_reg_type *reg_type) +				    enum bpf_reg_type *reg_type, int *ctx_field_size)  { +	int sample_period_off; +  	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))  		return false;  	if (type != BPF_READ)  		return false;  	if (off % size != 0)  		return false; -	if (off == offsetof(struct bpf_perf_event_data, sample_period)) { -		if (size != sizeof(u64)) -			return false; + +	/* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */ +	sample_period_off = offsetof(struct bpf_perf_event_data, sample_period); +	if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) { +		*ctx_field_size = 8; +#ifdef __LITTLE_ENDIAN +		return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0; +#else +		return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0; +#endif  	} else {  		if (size != sizeof(long))  			return false;  | 
