diff options
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/hv/bondvf.sh | 33 | ||||
-rw-r--r-- | tools/include/uapi/linux/bpf.h | 60 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.c | 68 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.h | 5 | ||||
-rw-r--r-- | tools/net/bpf_jit_disasm.c | 37 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/Makefile | 3 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_align.c | 7 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_maps.c | 50 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_obj_id.c | 35 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_pkt_md_access.c | 35 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 225 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 202 |
12 files changed, 689 insertions, 71 deletions
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh index d85968cb1bf2..89b25068cd98 100755 --- a/tools/hv/bondvf.sh +++ b/tools/hv/bondvf.sh @@ -102,15 +102,30 @@ function create_bond_cfg_redhat { } function del_eth_cfg_ubuntu { - local fn=$cfgdir/interfaces + local mainfn=$cfgdir/interfaces + local fnlist=( $mainfn ) + + local dirlist=(`awk '/^[ \t]*source/{print $2}' $mainfn`) + + local i + for i in "${dirlist[@]}" + do + fnlist+=(`ls $i 2>/dev/null`) + done + local tmpfl=$(mktemp) local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1 local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)' - awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl + local fn + for fn in "${fnlist[@]}" + do + awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" \ + $fn >$tmpfl - cp $tmpfl $fn + cp $tmpfl $fn + done rm $tmpfl } @@ -119,7 +134,6 @@ function create_eth_cfg_ubuntu { local fn=$cfgdir/interfaces del_eth_cfg_ubuntu $1 - echo $'\n'auto $1 >>$fn echo iface $1 inet manual >>$fn echo bond-master $2 >>$fn @@ -128,7 +142,10 @@ function create_eth_cfg_ubuntu { function create_eth_cfg_pri_ubuntu { local fn=$cfgdir/interfaces - create_eth_cfg_ubuntu $1 $2 + del_eth_cfg_ubuntu $1 + echo $'\n'allow-hotplug $1 >>$fn + echo iface $1 inet manual >>$fn + echo bond-master $2 >>$fn echo bond-primary $1 >>$fn } @@ -153,7 +170,11 @@ function create_eth_cfg_suse { } function create_eth_cfg_pri_suse { - create_eth_cfg_suse $1 + local fn=$cfgdir/ifcfg-$1 + + rm -f $fn + echo BOOTPROTO=none >>$fn + echo STARTMODE=hotplug >>$fn } function create_bond_cfg_suse { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 94dfa9def355..f94b48b168dc 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -82,6 +82,11 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_GET_NEXT_ID, + BPF_MAP_GET_NEXT_ID, + BPF_PROG_GET_FD_BY_ID, + BPF_MAP_GET_FD_BY_ID, + BPF_OBJ_GET_INFO_BY_FD, }; enum bpf_map_type { @@ -209,6 +214,21 @@ union bpf_attr { __u32 repeat; __u32 duration; } test; + + struct { /* anonymous struct used by BPF_*_GET_*_ID */ + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + }; + __u32 next_id; + }; + + struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ + __u32 bpf_fd; + __u32 info_len; + __aligned_u64 info; + } info; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -313,8 +333,11 @@ union bpf_attr { * @flags: room for future extensions * Return: 0 on success or negative error * - * u64 bpf_perf_event_read(&map, index) - * Return: Number events read or error code + * u64 bpf_perf_event_read(map, flags) + * read perf event counter value + * @map: pointer to perf_event_array map + * @flags: index of event in the map or bitmask flags + * Return: value of perf event counter read or error code * * int bpf_redirect(ifindex, flags) * redirect to another netdev @@ -328,11 +351,11 @@ union bpf_attr { * @skb: pointer to skb * Return: realm if != 0 * - * int bpf_perf_event_output(ctx, map, index, data, size) + * int bpf_perf_event_output(ctx, map, flags, data, size) * output perf raw sample * @ctx: struct pt_regs* * @map: pointer to perf_event_array map - * @index: index of event in the map + * @flags: index of event in the map or bitmask flags * @data: data on stack to be output as raw data * @size: size of data * Return: 0 on success or negative error @@ -490,6 +513,11 @@ union bpf_attr { * Get the owner uid of the socket stored inside sk_buff. * @skb: pointer to skb * Return: uid of the socket owner on success or overflowuid if failed. + * + * u32 bpf_set_hash(skb, hash) + * Set full skb->hash. + * @skb: pointer to skb + * @hash: hash to set */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -539,7 +567,8 @@ union bpf_attr { FN(xdp_adjust_head), \ FN(probe_read_str), \ FN(get_socket_cookie), \ - FN(get_socket_uid), + FN(get_socket_uid), \ + FN(set_hash), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -670,4 +699,25 @@ struct xdp_md { __u32 data_end; }; +#define BPF_TAG_SIZE 8 + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[BPF_TAG_SIZE]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __aligned_u64 jited_prog_insns; + __aligned_u64 xlated_prog_insns; +} __attribute__((aligned(8))); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 6e178987af8e..7e0405e1651d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -257,3 +257,71 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, *duration = attr.test.duration; return ret; } + +int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) +{ + union bpf_attr attr; + int err; + + bzero(&attr, sizeof(attr)); + attr.start_id = start_id; + + err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr)); + if (!err) + *next_id = attr.next_id; + + return err; +} + +int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) +{ + union bpf_attr attr; + int err; + + bzero(&attr, sizeof(attr)); + attr.start_id = start_id; + + err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr)); + if (!err) + *next_id = attr.next_id; + + return err; +} + +int bpf_prog_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + + bzero(&attr, sizeof(attr)); + attr.prog_id = id; + + return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); +} + +int bpf_map_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + + bzero(&attr, sizeof(attr)); + attr.map_id = id; + + return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); +} + +int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) +{ + union bpf_attr attr; + int err; + + bzero(&attr, sizeof(attr)); + bzero(info, *info_len); + attr.info.bpf_fd = prog_fd; + attr.info.info_len = *info_len; + attr.info.info = ptr_to_u64(info); + + err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); + if (!err) + *info_len = attr.info.info_len; + + return err; +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 972bd8333eb7..16de44a14b48 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -54,5 +54,10 @@ int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration); +int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); +int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); +int bpf_prog_get_fd_by_id(__u32 id); +int bpf_map_get_fd_by_id(__u32 id); +int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len); #endif diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index ad572e6cdbd0..422d9abd666a 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -159,8 +159,8 @@ static void put_log_buff(char *buff) free(buff); } -static unsigned int get_last_jit_image(char *haystack, size_t hlen, - uint8_t *image, size_t ilen) +static uint8_t *get_last_jit_image(char *haystack, size_t hlen, + unsigned int *ilen) { char *ptr, *pptr, *tmp; off_t off = 0; @@ -168,9 +168,10 @@ static unsigned int get_last_jit_image(char *haystack, size_t hlen, regmatch_t pmatch[1]; unsigned long base; regex_t regex; + uint8_t *image; if (hlen == 0) - return 0; + return NULL; ret = regcomp(®ex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ " "pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED); @@ -194,11 +195,22 @@ static unsigned int get_last_jit_image(char *haystack, size_t hlen, &flen, &proglen, &pass, &base); if (ret != 4) { regfree(®ex); - return 0; + return NULL; + } + if (proglen > 1000000) { + printf("proglen of %d too big, stopping\n", proglen); + return NULL; } + image = malloc(proglen); + if (!image) { + printf("Out of memory\n"); + return NULL; + } + memset(image, 0, proglen); + tmp = ptr = haystack + off; - while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) { + while ((ptr = strtok(tmp, "\n")) != NULL && ulen < proglen) { tmp = NULL; if (!strstr(ptr, "JIT code")) continue; @@ -208,10 +220,12 @@ static unsigned int get_last_jit_image(char *haystack, size_t hlen, ptr = pptr; do { image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16); - if (ptr == pptr || ulen >= ilen) { + if (ptr == pptr) { ulen--; break; } + if (ulen >= proglen) + break; ptr = pptr; } while (1); } @@ -222,7 +236,8 @@ static unsigned int get_last_jit_image(char *haystack, size_t hlen, printf("%lx + <x>:\n", base); regfree(®ex); - return ulen; + *ilen = ulen; + return image; } static void usage(void) @@ -237,12 +252,12 @@ static void usage(void) int main(int argc, char **argv) { unsigned int len, klen, opt, opcodes = 0; - static uint8_t image[32768]; char *kbuff, *file = NULL; char *ofile = NULL; int ofd; ssize_t nr; uint8_t *pos; + uint8_t *image = NULL; while ((opt = getopt(argc, argv, "of:O:")) != -1) { switch (opt) { @@ -262,7 +277,6 @@ int main(int argc, char **argv) } bfd_init(); - memset(image, 0, sizeof(image)); kbuff = get_log_buff(file, &klen); if (!kbuff) { @@ -270,8 +284,8 @@ int main(int argc, char **argv) return -1; } - len = get_last_jit_image(kbuff, klen, image, sizeof(image)); - if (len <= 0) { + image = get_last_jit_image(kbuff, klen, &len); + if (!image) { fprintf(stderr, "No JIT image found!\n"); goto done; } @@ -301,5 +315,6 @@ int main(int argc, char **argv) done: put_log_buff(kbuff); + free(image); return 0; } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f389b02d43a0..2ca51a8a588c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -14,7 +14,8 @@ LDLIBS += -lcap -lelf TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align -TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o +TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ + test_pkt_md_access.o TEST_PROGS := test_kmod.sh diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 9644d4e069de..bccebd935907 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -9,6 +9,8 @@ #include <stddef.h> #include <stdbool.h> +#include <sys/resource.h> + #include <linux/unistd.h> #include <linux/filter.h> #include <linux/bpf_perf_event.h> @@ -426,12 +428,15 @@ static int do_test(unsigned int from, unsigned int to) } printf("Results: %d pass %d fail\n", all_pass, all_fail); - return 0; + return all_fail ? EXIT_FAILURE : EXIT_SUCCESS; } int main(int argc, char **argv) { unsigned int from = 0, to = ARRAY_SIZE(tests); + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; + + setrlimit(RLIMIT_MEMLOCK, &rinf); if (argc == 3) { unsigned int l = atoi(argv[argc - 2]); diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 93314524de0d..79601c81e169 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -239,6 +239,54 @@ static void test_hashmap_percpu(int task, void *data) close(fd); } +static void test_hashmap_walk(int task, void *data) +{ + int fd, i, max_entries = 100000; + long long key, value, next_key; + bool next_key_valid = true; + + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + max_entries, map_flags); + if (fd < 0) { + printf("Failed to create hashmap '%s'!\n", strerror(errno)); + exit(1); + } + + for (i = 0; i < max_entries; i++) { + key = i; value = key; + assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0); + } + + for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key, + &next_key) == 0; i++) { + key = next_key; + assert(bpf_map_lookup_elem(fd, &key, &value) == 0); + } + + assert(i == max_entries); + + assert(bpf_map_get_next_key(fd, NULL, &key) == 0); + for (i = 0; next_key_valid; i++) { + next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0; + assert(bpf_map_lookup_elem(fd, &key, &value) == 0); + value++; + assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); + key = next_key; + } + + assert(i == max_entries); + + for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key, + &next_key) == 0; i++) { + key = next_key; + assert(bpf_map_lookup_elem(fd, &key, &value) == 0); + assert(value - 1 == key); + } + + assert(i == max_entries); + close(fd); +} + static void test_arraymap(int task, void *data) { int key, next_key, fd; @@ -464,6 +512,7 @@ static void test_map_stress(void) run_parallel(100, test_hashmap, NULL); run_parallel(100, test_hashmap_percpu, NULL); run_parallel(100, test_hashmap_sizes, NULL); + run_parallel(100, test_hashmap_walk, NULL); run_parallel(100, test_arraymap, NULL); run_parallel(100, test_arraymap_percpu, NULL); @@ -549,6 +598,7 @@ static void run_all_tests(void) { test_hashmap(0, NULL); test_hashmap_percpu(0, NULL); + test_hashmap_walk(0, NULL); test_arraymap(0, NULL); test_arraymap_percpu(0, NULL); diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/test_obj_id.c new file mode 100644 index 000000000000..880d2963b472 --- /dev/null +++ b/tools/testing/selftests/bpf/test_obj_id.c @@ -0,0 +1,35 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <linux/bpf.h> +#include <linux/pkt_cls.h> +#include "bpf_helpers.h" + +/* It is a dumb bpf program such that it must have no + * issue to be loaded since testing the verifier is + * not the focus here. + */ + +int _version SEC("version") = 1; + +struct bpf_map_def SEC("maps") test_map_id = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 1, +}; + +SEC("test_obj_id_dummy") +int test_obj_id(struct __sk_buff *skb) +{ + __u32 key = 0; + __u64 *value; + + value = bpf_map_lookup_elem(&test_map_id, &key); + + return TC_ACT_OK; +} diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c new file mode 100644 index 000000000000..71729d47eb85 --- /dev/null +++ b/tools/testing/selftests/bpf/test_pkt_md_access.c @@ -0,0 +1,35 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/pkt_cls.h> +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +#define TEST_FIELD(TYPE, FIELD, MASK) \ + { \ + TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ + if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ + return TC_ACT_SHOT; \ + } + +SEC("test1") +int process(struct __sk_buff *skb) +{ + TEST_FIELD(__u8, len, 0xFF); + TEST_FIELD(__u16, len, 0xFFFF); + TEST_FIELD(__u32, len, 0xFFFFFFFF); + TEST_FIELD(__u16, protocol, 0xFFFF); + TEST_FIELD(__u32, protocol, 0xFFFFFFFF); + TEST_FIELD(__u8, hash, 0xFF); + TEST_FIELD(__u16, hash, 0xFFFF); + TEST_FIELD(__u32, hash, 0xFFFFFFFF); + + return TC_ACT_OK; +} diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index b59f5ed4ae40..5855cd3d3d45 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -22,6 +22,8 @@ typedef __u16 __sum16; #include <sys/wait.h> #include <sys/resource.h> +#include <sys/types.h> +#include <fcntl.h> #include <linux/bpf.h> #include <linux/err.h> @@ -70,6 +72,7 @@ static struct { pass_cnt++; \ printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ } \ + __ret; \ }) static int bpf_prog_load(const char *file, enum bpf_prog_type type, @@ -283,6 +286,224 @@ static void test_tcp_estats(void) bpf_object__close(obj); } +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +static void test_bpf_obj_id(void) +{ + const __u64 array_magic_value = 0xfaceb00c; + const __u32 array_key = 0; + const int nr_iters = 2; + const char *file = "./test_obj_id.o"; + const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; + + struct bpf_object *objs[nr_iters]; + int prog_fds[nr_iters], map_fds[nr_iters]; + /* +1 to test for the info_len returned by kernel */ + struct bpf_prog_info prog_infos[nr_iters + 1]; + struct bpf_map_info map_infos[nr_iters + 1]; + char jited_insns[128], xlated_insns[128]; + __u32 i, next_id, info_len, nr_id_found, duration = 0; + int sysctl_fd, jit_enabled = 0, err = 0; + __u64 array_value; + + sysctl_fd = open(jit_sysctl, 0, O_RDONLY); + if (sysctl_fd != -1) { + char tmpc; + + if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) + jit_enabled = (tmpc != '0'); + close(sysctl_fd); + } + + err = bpf_prog_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); + + err = bpf_map_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); + + for (i = 0; i < nr_iters; i++) + objs[i] = NULL; + + /* Check bpf_obj_get_info_by_fd() */ + for (i = 0; i < nr_iters; i++) { + err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, + &objs[i], &prog_fds[i]); + /* test_obj_id.o is a dumb prog. It should never fail + * to load. + */ + assert(!err); + + /* Check getting prog info */ + info_len = sizeof(struct bpf_prog_info) * 2; + prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); + prog_infos[i].jited_prog_len = sizeof(jited_insns); + prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); + prog_infos[i].xlated_prog_len = sizeof(xlated_insns); + err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], + &info_len); + if (CHECK(err || + prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || + info_len != sizeof(struct bpf_prog_info) || + (jit_enabled && !prog_infos[i].jited_prog_len) || + !prog_infos[i].xlated_prog_len, + "get-prog-info(fd)", + "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u\n", + err, errno, i, + prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, + info_len, sizeof(struct bpf_prog_info), + jit_enabled, + prog_infos[i].jited_prog_len, + prog_infos[i].xlated_prog_len)) + goto done; + + map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); + assert(map_fds[i] >= 0); + err = bpf_map_update_elem(map_fds[i], &array_key, + &array_magic_value, 0); + assert(!err); + + /* Check getting map info */ + info_len = sizeof(struct bpf_map_info) * 2; + err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], + &info_len); + if (CHECK(err || + map_infos[i].type != BPF_MAP_TYPE_ARRAY || + map_infos[i].key_size != sizeof(__u32) || + map_infos[i].value_size != sizeof(__u64) || + map_infos[i].max_entries != 1 || + map_infos[i].map_flags != 0 || + info_len != sizeof(struct bpf_map_info), + "get-map-info(fd)", + "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n", + err, errno, + map_infos[i].type, BPF_MAP_TYPE_ARRAY, + info_len, sizeof(struct bpf_map_info), + map_infos[i].key_size, + map_infos[i].value_size, + map_infos[i].max_entries, + map_infos[i].map_flags)) + goto done; + } + + /* Check bpf_prog_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_prog_get_next_id(next_id, &next_id)) { + struct bpf_prog_info prog_info; + int prog_fd; + + info_len = sizeof(prog_info); + + prog_fd = bpf_prog_get_fd_by_id(next_id); + if (prog_fd < 0 && errno == ENOENT) + /* The bpf_prog is in the dead row */ + continue; + if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", + "prog_fd %d next_id %d errno %d\n", + prog_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (prog_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + CHECK(err || info_len != sizeof(struct bpf_prog_info) || + memcmp(&prog_info, &prog_infos[i], info_len), + "get-prog-info(next_id->fd)", + "err %d errno %d info_len %u(%lu) memcmp %d\n", + err, errno, info_len, sizeof(struct bpf_prog_info), + memcmp(&prog_info, &prog_infos[i], info_len)); + + close(prog_fd); + } + CHECK(nr_id_found != nr_iters, + "check total prog id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + + /* Check bpf_map_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_map_get_next_id(next_id, &next_id)) { + struct bpf_map_info map_info; + int map_fd; + + info_len = sizeof(map_info); + + map_fd = bpf_map_get_fd_by_id(next_id); + if (map_fd < 0 && errno == ENOENT) + /* The bpf_map is in the dead row */ + continue; + if (CHECK(map_fd < 0, "get-map-fd(next_id)", + "map_fd %d next_id %u errno %d\n", + map_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (map_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); + assert(!err); + + err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + CHECK(err || info_len != sizeof(struct bpf_map_info) || + memcmp(&map_info, &map_infos[i], info_len) || + array_value != array_magic_value, + "check get-map-info(next_id->fd)", + "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", + err, errno, info_len, sizeof(struct bpf_map_info), + memcmp(&map_info, &map_infos[i], info_len), + array_value, array_magic_value); + + close(map_fd); + } + CHECK(nr_id_found != nr_iters, + "check total map id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + +done: + for (i = 0; i < nr_iters; i++) + bpf_object__close(objs[i]); +} + +static void test_pkt_md_access(void) +{ + const char *file = "./test_pkt_md_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + bpf_object__close(obj); +} + int main(void) { struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; @@ -293,7 +514,9 @@ int main(void) test_xdp(); test_l4lb(); test_tcp_estats(); + test_bpf_obj_id(); + test_pkt_md_access(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); - return 0; + return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index cabb19b1e371..c0af0195432f 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1073,44 +1073,75 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { - "check cb access: byte, oob 1", + "__sk_buff->hash, offset 0, byte store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 2", + "__sk_buff->tc_index, offset 3, byte store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 1), + offsetof(struct __sk_buff, tc_index) + 3), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 3", + "check skb->hash byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check skb->hash byte load not permitted 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 4", + "check skb->hash byte load not permitted 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 1), + offsetof(struct __sk_buff, hash) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check skb->hash byte load not permitted 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#endif BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -1188,44 +1219,53 @@ static struct bpf_test tests[] = { .result = REJECT, }, { - "check cb access: half, oob 1", + "check __sk_buff->hash, offset 0, half store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: half, oob 2", + "check __sk_buff->tc_index, offset 2, half store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 2), + offsetof(struct __sk_buff, tc_index) + 2), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: half, oob 3", + "check skb->hash half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#endif BPF_EXIT_INSN(), }, - .errstr = "invalid bpf_context access", - .result = REJECT, + .result = ACCEPT, }, { - "check cb access: half, oob 4", + "check skb->hash half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#else BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 2), + offsetof(struct __sk_buff, hash)), +#endif BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -1368,28 +1408,6 @@ static struct bpf_test tests[] = { "check cb access: double, oob 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, oob 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, oob 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, cb[4])), BPF_EXIT_INSN(), @@ -1398,22 +1416,22 @@ static struct bpf_test tests[] = { .result = REJECT, }, { - "check cb access: double, oob 5", + "check __sk_buff->ifindex dw store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, ifindex)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: double, oob 6", + "check __sk_buff->ifindex dw load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 8), + offsetof(struct __sk_buff, ifindex)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -5169,6 +5187,98 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, }, + { + "check bpf_perf_event_data->sample_period byte load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 7), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period half load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 6), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period word load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 4), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period dword load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check skb->data half load not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + }, + { + "check skb->tc_classid half load not permitted for lwt prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -5418,7 +5528,7 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) } printf("Summary: %d PASSED, %d FAILED\n", passes, errors); - return errors ? -errors : 0; + return errors ? EXIT_FAILURE : EXIT_SUCCESS; } int main(int argc, char **argv) |