diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2019-10-11 02:49:16 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-10-11 02:49:21 +0300 |
commit | 63098555cfe09a633b74a3c9963769c6d4df8432 (patch) | |
tree | 59d859c0324f8da7eea026033a3ccd8ffec63b05 | |
parent | e0b68fb186b251374adbd870f99b1ecea236e770 (diff) | |
parent | 666b2c10ee9d51f14d04c416a14b1cb6fd0846e4 (diff) | |
download | linux-63098555cfe09a633b74a3c9963769c6d4df8432.tar.xz |
Merge branch 'bpf-romap-known-scalars'
Andrii Nakryiko says:
====================
With BPF maps supporting direct map access (currently, array_map w/ single
element, used for global data) that are read-only both from system call and
BPF side, it's possible for BPF verifier to track its contents as known
constants.
Now it's possible for user-space control app to pre-initialize read-only map
(e.g., for .rodata section) with user-provided flags and parameters and rely
on BPF verifier to detect and eliminate dead code resulting from specific
combination of input parameters.
v1->v2:
- BPF_F_RDONLY means nothing, stick to just map->frozen (Daniel);
- stick to passing just offset into map_direct_value_addr (Martin).
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | kernel/bpf/verifier.c | 57 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/rdonly_maps.c | 99 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/test_rdonly_maps.c | 83 |
3 files changed, 237 insertions, 2 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ffc3e53f5300..b818fed3208d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2739,6 +2739,41 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) reg->smax_value = reg->umax_value; } +static bool bpf_map_is_rdonly(const struct bpf_map *map) +{ + return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; +} + +static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) +{ + void *ptr; + u64 addr; + int err; + + err = map->ops->map_direct_value_addr(map, &addr, off); + if (err) + return err; + ptr = (void *)addr + off; + + switch (size) { + case sizeof(u8): + *val = (u64)*(u8 *)ptr; + break; + case sizeof(u16): + *val = (u64)*(u16 *)ptr; + break; + case sizeof(u32): + *val = (u64)*(u32 *)ptr; + break; + case sizeof(u64): + *val = *(u64 *)ptr; + break; + default: + return -EINVAL; + } + return 0; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -2776,9 +2811,27 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (err) return err; err = check_map_access(env, regno, off, size, false); - if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown(env, regs, value_regno); + if (!err && t == BPF_READ && value_regno >= 0) { + struct bpf_map *map = reg->map_ptr; + + /* if map is read-only, track its contents as scalars */ + if (tnum_is_const(reg->var_off) && + bpf_map_is_rdonly(map) && + map->ops->map_direct_value_addr) { + int map_off = off + reg->var_off.value; + u64 val = 0; + err = bpf_map_direct_read(map, map_off, size, + &val); + if (err) + return err; + + regs[value_regno].type = SCALAR_VALUE; + __mark_reg_known(®s[value_regno], val); + } else { + mark_reg_unknown(env, regs, value_regno); + } + } } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c new file mode 100644 index 000000000000..9bf9de0aaeea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +struct bss { + unsigned did_run; + unsigned iters; + unsigned sum; +}; + +struct rdonly_map_subtest { + const char *subtest_name; + const char *prog_name; + unsigned exp_iters; + unsigned exp_sum; +}; + +void test_rdonly_maps(void) +{ + const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop"; + const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop"; + const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop"; + const char *file = "test_rdonly_maps.o"; + struct rdonly_map_subtest subtests[] = { + { "skip loop", prog_name_skip_loop, 0, 0 }, + { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 }, + { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 }, + }; + int i, err, zero = 0, duration = 0; + struct bpf_link *link = NULL; + struct bpf_program *prog; + struct bpf_map *bss_map; + struct bpf_object *obj; + struct bss bss; + + obj = bpf_object__open_file(file, NULL); + if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj))) + return; + + bpf_object__for_each_program(prog, obj) { + bpf_program__set_raw_tracepoint(prog); + } + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss"); + if (CHECK(!bss_map, "find_bss_map", "failed\n")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(subtests); i++) { + const struct rdonly_map_subtest *t = &subtests[i]; + + if (!test__start_subtest(t->subtest_name)) + continue; + + prog = bpf_object__find_program_by_title(obj, t->prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", + t->prog_name)) + goto cleanup; + + memset(&bss, 0, sizeof(bss)); + err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0); + if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err)) + goto cleanup; + + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n", + t->prog_name, PTR_ERR(link))) { + link = NULL; + goto cleanup; + } + + /* trigger probe */ + usleep(1); + + bpf_link__destroy(link); + link = NULL; + + err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss); + if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err)) + goto cleanup; + if (CHECK(bss.did_run == 0, "check_run", + "prog '%s' didn't run?\n", t->prog_name)) + goto cleanup; + if (CHECK(bss.iters != t->exp_iters, "check_iters", + "prog '%s' iters: %d, expected: %d\n", + t->prog_name, bss.iters, t->exp_iters)) + goto cleanup; + if (CHECK(bss.sum != t->exp_sum, "check_sum", + "prog '%s' sum: %d, expected: %d\n", + t->prog_name, bss.sum, t->exp_sum)) + goto cleanup; + } + +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c new file mode 100644 index 000000000000..52d94e8b214d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/ptrace.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" + +static volatile const struct { + unsigned a[4]; + /* + * if the struct's size is multiple of 16, compiler will put it into + * .rodata.cst16 section, which is not recognized by libbpf; work + * around this by ensuring we don't have 16-aligned struct + */ + char _y; +} rdonly_values = { .a = {2, 3, 4, 5} }; + +static volatile struct { + unsigned did_run; + unsigned iters; + unsigned sum; +} res; + +SEC("raw_tracepoint/sys_enter:skip_loop") +int skip_loop(struct pt_regs *ctx) +{ + /* prevent compiler to optimize everything out */ + unsigned * volatile p = (void *)&rdonly_values.a; + unsigned iters = 0, sum = 0; + + /* we should never enter this loop */ + while (*p & 1) { + iters++; + sum += *p; + p++; + } + res.did_run = 1; + res.iters = iters; + res.sum = sum; + return 0; +} + +SEC("raw_tracepoint/sys_enter:part_loop") +int part_loop(struct pt_regs *ctx) +{ + /* prevent compiler to optimize everything out */ + unsigned * volatile p = (void *)&rdonly_values.a; + unsigned iters = 0, sum = 0; + + /* validate verifier can derive loop termination */ + while (*p < 5) { + iters++; + sum += *p; + p++; + } + res.did_run = 1; + res.iters = iters; + res.sum = sum; + return 0; +} + +SEC("raw_tracepoint/sys_enter:full_loop") +int full_loop(struct pt_regs *ctx) +{ + /* prevent compiler to optimize everything out */ + unsigned * volatile p = (void *)&rdonly_values.a; + int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]); + unsigned iters = 0, sum = 0; + + /* validate verifier can allow full loop as well */ + while (i > 0 ) { + iters++; + sum += *p; + p++; + i--; + } + res.did_run = 1; + res.iters = iters; + res.sum = sum; + return 0; +} + +char _license[] SEC("license") = "GPL"; |