diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2019-04-23 19:36:35 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-04-23 19:36:36 +0300 |
commit | 2aad32613c353b1e05da5994324bc5f20d0dd55a (patch) | |
tree | da9f76ad09a765c5818de87d3b4e5401725ed49d /tools | |
parent | 7e6e185c74dd8a8dc539300c079adc6bc27045d6 (diff) | |
parent | 02ee0658362d3713421851bb7487af77a4098bb5 (diff) | |
download | linux-2aad32613c353b1e05da5994324bc5f20d0dd55a.tar.xz |
Merge branch 'bpf-eth-get-headlen'
Stanislav Fomichev says:
====================
Currently, when eth_get_headlen calls flow dissector, it doesn't pass any
skb. Because we use passed skb to lookup associated networking namespace
to find whether we have a BPF program attached or not, we always use
C-based flow dissector in this case.
The goal of this patch series is to add new networking namespace argument
to the eth_get_headlen and make BPF flow dissector programs be able to
work in the skb-less case.
The series goes like this:
* use new kernel context (struct bpf_flow_dissector) for flow dissector
programs; this makes it easy to distinguish between skb and no-skb
case and supports calling BPF flow dissector on a chunk of raw data
* convert BPF_PROG_TEST_RUN to use raw data
* plumb network namespace into __skb_flow_dissect from all callers
* handle no-skb case in __skb_flow_dissect
* update eth_get_headlen to include net namespace argument and
convert all existing users
* add selftest to make sure bpf_skb_load_bytes is not allowed in
the no-skb mode
* extend test_progs to exercise skb-less flow dissection as well
* stop adjusting nhoff/thoff by ETH_HLEN in BPF_PROG_TEST_RUN
v6:
* more suggestions by Alexei:
* eth_get_headlen now takes net dev, not net namespace
* test skb-less case via tun eth_get_headlen
* fix return errors in bpf_flow_load
* don't adjust nhoff/thoff by ETH_HLEN
v5:
* API changes have been submitted via bpf/stable tree
v4:
* prohibit access to vlan fields as well (otherwise, inconsistent
between skb/skb-less cases)
* drop extra unneeded check for skb->vlan_present in bpf_flow.c
v3:
* new kernel xdp_buff-like context per Alexei suggestion
* drop skb_net helper
* properly clamp flow_keys->nhoff
v2:
* moved temporary skb from stack into percpu (avoids memset of ~200 bytes
per packet)
* tightened down access to __sk_buff fields from flow dissector programs to
avoid touching shinfo (whitelist only relevant fields)
* addressed suggestions from Willem
====================
Acked-by: Eric Dumazet <edumazet@google.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools')
5 files changed, 220 insertions, 46 deletions
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c index 7136ab9ffa73..3fd83b9dc1bf 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.c +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -26,7 +26,7 @@ static void load_and_attach_program(void) struct bpf_object *obj; ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, - cfg_map_name, &prog_fd); + cfg_map_name, NULL, &prog_fd, NULL); if (ret) error(1, 0, "bpf_flow_load %s", cfg_path_name); diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h index 41dd6959feb0..daeaeb518894 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.h +++ b/tools/testing/selftests/bpf/flow_dissector_load.h @@ -9,10 +9,12 @@ static inline int bpf_flow_load(struct bpf_object **obj, const char *path, const char *section_name, const char *map_name, - int *prog_fd) + const char *keys_map_name, + int *prog_fd, + int *keys_fd) { struct bpf_program *prog, *main_prog; - struct bpf_map *prog_array; + struct bpf_map *prog_array, *keys; int prog_array_fd; int ret, fd, i; @@ -23,19 +25,29 @@ static inline int bpf_flow_load(struct bpf_object **obj, main_prog = bpf_object__find_program_by_title(*obj, section_name); if (!main_prog) - return ret; + return -1; *prog_fd = bpf_program__fd(main_prog); if (*prog_fd < 0) - return ret; + return -1; prog_array = bpf_object__find_map_by_name(*obj, map_name); if (!prog_array) - return ret; + return -1; prog_array_fd = bpf_map__fd(prog_array); if (prog_array_fd < 0) - return ret; + return -1; + + if (keys_map_name && keys_fd) { + keys = bpf_object__find_map_by_name(*obj, keys_map_name); + if (!keys) + return -1; + + *keys_fd = bpf_map__fd(keys); + if (*keys_fd < 0) + return -1; + } i = 0; bpf_object__for_each_program(prog, *obj) { diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 126319f9a97c..8b54adfd6264 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <error.h> +#include <linux/if.h> +#include <linux/if_tun.h> #define CHECK_FLOW_KEYS(desc, got, expected) \ CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ @@ -79,8 +82,8 @@ struct test tests[] = { .tcp.doff = 5, }, .keys = { - .nhoff = 0, - .thoff = sizeof(struct iphdr), + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), .addr_proto = ETH_P_IP, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IP), @@ -95,8 +98,8 @@ struct test tests[] = { .tcp.doff = 5, }, .keys = { - .nhoff = 0, - .thoff = sizeof(struct ipv6hdr), + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), .addr_proto = ETH_P_IPV6, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IPV6), @@ -113,8 +116,8 @@ struct test tests[] = { .tcp.doff = 5, }, .keys = { - .nhoff = VLAN_HLEN, - .thoff = VLAN_HLEN + sizeof(struct iphdr), + .nhoff = ETH_HLEN + VLAN_HLEN, + .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), .addr_proto = ETH_P_IP, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IP), @@ -131,8 +134,9 @@ struct test tests[] = { .tcp.doff = 5, }, .keys = { - .nhoff = VLAN_HLEN * 2, - .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr), + .nhoff = ETH_HLEN + VLAN_HLEN * 2, + .thoff = ETH_HLEN + VLAN_HLEN * 2 + + sizeof(struct ipv6hdr), .addr_proto = ETH_P_IPV6, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IPV6), @@ -140,13 +144,73 @@ struct test tests[] = { }, }; +static int create_tap(const char *ifname) +{ + struct ifreq ifr = { + .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, + }; + int fd, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + fd = open("/dev/net/tun", O_RDWR); + if (fd < 0) + return -1; + + ret = ioctl(fd, TUNSETIFF, &ifr); + if (ret) + return -1; + + return fd; +} + +static int tx_tap(int fd, void *pkt, size_t len) +{ + struct iovec iov[] = { + { + .iov_len = len, + .iov_base = pkt, + }, + }; + return writev(fd, iov, ARRAY_SIZE(iov)); +} + +static int ifup(const char *ifname) +{ + struct ifreq ifr = {}; + int sk, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + sk = socket(PF_INET, SOCK_DGRAM, 0); + if (sk < 0) + return -1; + + ret = ioctl(sk, SIOCGIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + ifr.ifr_flags |= IFF_UP; + ret = ioctl(sk, SIOCSIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + close(sk); + return 0; +} + void test_flow_dissector(void) { + int i, err, prog_fd, keys_fd = -1, tap_fd; struct bpf_object *obj; - int i, err, prog_fd; + __u32 duration = 0; err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", - "jmp_table", &prog_fd); + "jmp_table", "last_dissection", &prog_fd, &keys_fd); if (err) { error_cnt++; return; @@ -171,5 +235,34 @@ void test_flow_dissector(void) CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); } + /* Do the same tests but for skb-less flow dissector. + * We use a known path in the net/tun driver that calls + * eth_get_headlen and we manually export bpf_flow_keys + * via BPF map in this case. + */ + + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + CHECK(err, "bpf_prog_attach", "err %d errno %d", err, errno); + + tap_fd = create_tap("tap0"); + CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d", tap_fd, errno); + err = ifup("tap0"); + CHECK(err, "ifup", "err %d errno %d", err, errno); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct bpf_flow_keys flow_keys = {}; + struct bpf_prog_test_run_attr tattr = {}; + __u32 key = 0; + + err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); + CHECK(err < 0, "tx_tap", "err %d errno %d", err, errno); + + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); + CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); + + CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + } + bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c new file mode 100644 index 000000000000..dc5ef155ec28 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_flow_dissector_load_bytes(void) +{ + struct bpf_flow_keys flow_keys; + __u32 duration = 0, retval, size; + struct bpf_insn prog[] = { + // BPF_REG_1 - 1st argument: context + // BPF_REG_2 - 2nd argument: offset, start at first byte + BPF_MOV64_IMM(BPF_REG_2, 0), + // BPF_REG_3 - 3rd argument: destination, reserve byte on stack + BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1), + // BPF_REG_4 - 4th argument: copy one byte + BPF_MOV64_IMM(BPF_REG_4, 1), + // bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1) + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_skb_load_bytes), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + // if (ret == 0) return BPF_DROP (2) + BPF_MOV64_IMM(BPF_REG_0, BPF_DROP), + BPF_EXIT_INSN(), + // if (ret != 0) return BPF_OK (0) + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd, err; + + /* make sure bpf_skb_load_bytes is not allowed from skb-less context + */ + fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + CHECK(fd < 0, + "flow_dissector-bpf_skb_load_bytes-load", + "fd %d errno %d\n", + fd, errno); + + err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, + "flow_dissector-bpf_skb_load_bytes", + "err %d errno %d retval %d duration %d size %u/%zu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + + if (fd >= -1) + close(fd); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 75b17cada539..81ad9a0b29d0 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -64,6 +64,25 @@ struct bpf_map_def SEC("maps") jmp_table = { .max_entries = 8 }; +struct bpf_map_def SEC("maps") last_dissection = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct bpf_flow_keys), + .max_entries = 1, +}; + +static __always_inline int export_flow_keys(struct bpf_flow_keys *keys, + int ret) +{ + struct bpf_flow_keys *val; + __u32 key = 0; + + val = bpf_map_lookup_elem(&last_dissection, &key); + if (val) + memcpy(val, keys, sizeof(*val)); + return ret; +} + static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, __u16 hdr_size, void *buffer) @@ -109,10 +128,10 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) break; default: /* Protocol not supported */ - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); } - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); } SEC("flow_dissector") @@ -139,8 +158,8 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) case IPPROTO_ICMP: icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp); if (!icmp) - return BPF_DROP; - return BPF_OK; + return export_flow_keys(keys, BPF_DROP); + return export_flow_keys(keys, BPF_OK); case IPPROTO_IPIP: keys->is_encap = true; return parse_eth_proto(skb, bpf_htons(ETH_P_IP)); @@ -150,11 +169,11 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) case IPPROTO_GRE: gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre); if (!gre) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); if (bpf_htons(gre->flags & GRE_VERSION)) /* Only inspect standard GRE packets with version 0 */ - return BPF_OK; + return export_flow_keys(keys, BPF_OK); keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */ if (GRE_IS_CSUM(gre->flags)) @@ -170,7 +189,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) eth = bpf_flow_dissect_get_header(skb, sizeof(*eth), &_eth); if (!eth) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->thoff += sizeof(*eth); @@ -181,31 +200,31 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) case IPPROTO_TCP: tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp); if (!tcp) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); if (tcp->doff < 5) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); if ((__u8 *)tcp + (tcp->doff << 2) > data_end) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->sport = tcp->source; keys->dport = tcp->dest; - return BPF_OK; + return export_flow_keys(keys, BPF_OK); case IPPROTO_UDP: case IPPROTO_UDPLITE: udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp); if (!udp) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->sport = udp->source; keys->dport = udp->dest; - return BPF_OK; + return export_flow_keys(keys, BPF_OK); default: - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); } - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); } static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) @@ -225,7 +244,7 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) return parse_ip_proto(skb, nexthdr); } - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); } PROG(IP)(struct __sk_buff *skb) @@ -238,11 +257,11 @@ PROG(IP)(struct __sk_buff *skb) iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph); if (!iph) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); /* IP header cannot be smaller than 20 bytes */ if (iph->ihl < 5) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->addr_proto = ETH_P_IP; keys->ipv4_src = iph->saddr; @@ -250,7 +269,7 @@ PROG(IP)(struct __sk_buff *skb) keys->thoff += iph->ihl << 2; if (data + keys->thoff > data_end) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { keys->is_frag = true; @@ -264,7 +283,7 @@ PROG(IP)(struct __sk_buff *skb) } if (done) - return BPF_OK; + return export_flow_keys(keys, BPF_OK); return parse_ip_proto(skb, iph->protocol); } @@ -276,7 +295,7 @@ PROG(IPV6)(struct __sk_buff *skb) ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); if (!ip6h) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->addr_proto = ETH_P_IPV6; memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); @@ -288,11 +307,12 @@ PROG(IPV6)(struct __sk_buff *skb) PROG(IPV6OP)(struct __sk_buff *skb) { + struct bpf_flow_keys *keys = skb->flow_keys; struct ipv6_opt_hdr *ip6h, _ip6h; ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); if (!ip6h) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); /* hlen is in 8-octets and does not include the first 8 bytes * of the header @@ -309,7 +329,7 @@ PROG(IPV6FR)(struct __sk_buff *skb) fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh); if (!fragh) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->thoff += sizeof(*fragh); keys->is_frag = true; @@ -321,13 +341,14 @@ PROG(IPV6FR)(struct __sk_buff *skb) PROG(MPLS)(struct __sk_buff *skb) { + struct bpf_flow_keys *keys = skb->flow_keys; struct mpls_label *mpls, _mpls; mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls); if (!mpls) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); - return BPF_OK; + return export_flow_keys(keys, BPF_OK); } PROG(VLAN)(struct __sk_buff *skb) @@ -339,10 +360,10 @@ PROG(VLAN)(struct __sk_buff *skb) if (keys->n_proto == bpf_htons(ETH_P_8021AD)) { vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); if (!vlan) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->nhoff += sizeof(*vlan); keys->thoff += sizeof(*vlan); @@ -350,14 +371,14 @@ PROG(VLAN)(struct __sk_buff *skb) vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); if (!vlan) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->nhoff += sizeof(*vlan); keys->thoff += sizeof(*vlan); /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) - return BPF_DROP; + return export_flow_keys(keys, BPF_DROP); keys->n_proto = vlan->h_vlan_encapsulated_proto; return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); |