summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/net/bpf_jit_comp.c15
-rwxr-xr-xsamples/bpf/test_cgrp2_sock.sh1
-rwxr-xr-xsamples/bpf/test_cgrp2_sock2.sh3
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst18
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool13
-rw-r--r--tools/bpf/bpftool/cfg.c514
-rw-r--r--tools/bpf/bpftool/cfg.h43
-rw-r--r--tools/bpf/bpftool/main.c104
-rw-r--r--tools/bpf/bpftool/prog.c305
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c338
-rw-r--r--tools/bpf/bpftool/xlated_dumper.h64
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/test_align.c6
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c6
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c14
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c6
-rw-r--r--tools/testing/selftests/bpf/test_maps.c7
-rw-r--r--tools/testing/selftests/bpf/test_progs.c7
-rw-r--r--tools/testing/selftests/bpf/test_tag.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c6
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c123
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c8
22 files changed, 1259 insertions, 374 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index cbf94d44f3d5..eb661fff94d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -11,10 +11,10 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include <asm/cacheflush.h>
+#include <linux/bpf.h>
+
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
-#include <linux/bpf.h>
/*
* assembly code in arch/x86/net/bpf_jit.S
@@ -103,16 +103,6 @@ static int bpf_size_to_x86_bytes(int bpf_size)
#define X86_JLE 0x7E
#define X86_JG 0x7F
-static void bpf_flush_icache(void *start, void *end)
-{
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
- set_fs(old_fs);
-}
-
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
@@ -1266,7 +1256,6 @@ skip_init_addrs:
bpf_jit_dump(prog->len, proglen, pass + 1, image);
if (image) {
- bpf_flush_icache(header, image + proglen);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
} else {
diff --git a/samples/bpf/test_cgrp2_sock.sh b/samples/bpf/test_cgrp2_sock.sh
index 8ee0371a100a..9f6174236856 100755
--- a/samples/bpf/test_cgrp2_sock.sh
+++ b/samples/bpf/test_cgrp2_sock.sh
@@ -61,6 +61,7 @@ cleanup_and_exit()
[ -n "$msg" ] && echo "ERROR: $msg"
+ test_cgrp2_sock -d ${CGRP_MNT}/sockopts
ip li del cgrp2_sock
umount ${CGRP_MNT}
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
index fc4e64d00cb3..0f396a86e0cb 100755
--- a/samples/bpf/test_cgrp2_sock2.sh
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -28,6 +28,9 @@ function attach_bpf {
}
function cleanup {
+ if [ -d /tmp/cgroupv2/foo ]; then
+ test_cgrp2_sock -d /tmp/cgroupv2/foo
+ fi
ip link del veth0b
ip netns delete at_ns0
umount /tmp/cgroupv2
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index e4ceee7f2dff..67ca6c69376c 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -21,7 +21,7 @@ MAP COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
-| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
+| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
| **bpftool** **prog load** *OBJ* *FILE*
@@ -39,12 +39,18 @@ DESCRIPTION
Output will start with program ID followed by program type and
zero or more named attributes (depending on kernel version).
- **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** }]
- Dump eBPF instructions of the program from the kernel.
- If *FILE* is specified image will be written to a file,
- otherwise it will be disassembled and printed to stdout.
+ **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** }]
+ Dump eBPF instructions of the program from the kernel. By
+ default, eBPF will be disassembled and printed to standard
+ output in human-readable format. In this case, **opcodes**
+ controls if raw opcodes should be printed as well.
- **opcodes** controls if raw opcodes will be printed.
+ If **file** is specified, the binary image will instead be
+ written to *FILE*.
+
+ If **visual** is specified, control flow graph (CFG) will be
+ built instead, and eBPF instructions will be presented with
+ CFG in DOT format, on standard output.
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }]
Dump jited image (host machine code) of the program.
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 08719c54a614..490811b45fa7 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -147,7 +147,7 @@ _bpftool()
# Deal with simplest keywords
case $prev in
- help|key|opcodes)
+ help|key|opcodes|visual)
return 0
;;
tag)
@@ -223,11 +223,16 @@ _bpftool()
return 0
;;
*)
- _bpftool_once_attr 'file'
+ _bpftool_once_attr 'file'
+ if _bpftool_search_list 'xlated'; then
+ COMPREPLY+=( $( compgen -W 'opcodes visual' -- \
+ "$cur" ) )
+ else
COMPREPLY+=( $( compgen -W 'opcodes' -- \
"$cur" ) )
- return 0
- ;;
+ fi
+ return 0
+ ;;
esac
;;
pin)
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
new file mode 100644
index 000000000000..f30b3a4a840b
--- /dev/null
+++ b/tools/bpf/bpftool/cfg.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cfg.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+struct cfg {
+ struct list_head funcs;
+ int func_num;
+};
+
+struct func_node {
+ struct list_head l;
+ struct list_head bbs;
+ struct bpf_insn *start;
+ struct bpf_insn *end;
+ int idx;
+ int bb_num;
+};
+
+struct bb_node {
+ struct list_head l;
+ struct list_head e_prevs;
+ struct list_head e_succs;
+ struct bpf_insn *head;
+ struct bpf_insn *tail;
+ int idx;
+};
+
+#define EDGE_FLAG_EMPTY 0x0
+#define EDGE_FLAG_FALLTHROUGH 0x1
+#define EDGE_FLAG_JUMP 0x2
+struct edge_node {
+ struct list_head l;
+ struct bb_node *src;
+ struct bb_node *dst;
+ int flags;
+};
+
+#define ENTRY_BLOCK_INDEX 0
+#define EXIT_BLOCK_INDEX 1
+#define NUM_FIXED_BLOCKS 2
+#define func_prev(func) list_prev_entry(func, l)
+#define func_next(func) list_next_entry(func, l)
+#define bb_prev(bb) list_prev_entry(bb, l)
+#define bb_next(bb) list_next_entry(bb, l)
+#define entry_bb(func) func_first_bb(func)
+#define exit_bb(func) func_last_bb(func)
+#define cfg_first_func(cfg) \
+ list_first_entry(&cfg->funcs, struct func_node, l)
+#define cfg_last_func(cfg) \
+ list_last_entry(&cfg->funcs, struct func_node, l)
+#define func_first_bb(func) \
+ list_first_entry(&func->bbs, struct bb_node, l)
+#define func_last_bb(func) \
+ list_last_entry(&func->bbs, struct bb_node, l)
+
+static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
+{
+ struct func_node *new_func, *func;
+
+ list_for_each_entry(func, &cfg->funcs, l) {
+ if (func->start == insn)
+ return func;
+ else if (func->start > insn)
+ break;
+ }
+
+ func = func_prev(func);
+ new_func = calloc(1, sizeof(*new_func));
+ if (!new_func) {
+ p_err("OOM when allocating FUNC node");
+ return NULL;
+ }
+ new_func->start = insn;
+ new_func->idx = cfg->func_num;
+ list_add(&new_func->l, &func->l);
+ cfg->func_num++;
+
+ return new_func;
+}
+
+static struct bb_node *func_append_bb(struct func_node *func,
+ struct bpf_insn *insn)
+{
+ struct bb_node *new_bb, *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ if (bb->head == insn)
+ return bb;
+ else if (bb->head > insn)
+ break;
+ }
+
+ bb = bb_prev(bb);
+ new_bb = calloc(1, sizeof(*new_bb));
+ if (!new_bb) {
+ p_err("OOM when allocating BB node");
+ return NULL;
+ }
+ new_bb->head = insn;
+ INIT_LIST_HEAD(&new_bb->e_prevs);
+ INIT_LIST_HEAD(&new_bb->e_succs);
+ list_add(&new_bb->l, &bb->l);
+
+ return new_bb;
+}
+
+static struct bb_node *func_insert_dummy_bb(struct list_head *after)
+{
+ struct bb_node *bb;
+
+ bb = calloc(1, sizeof(*bb));
+ if (!bb) {
+ p_err("OOM when allocating BB node");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&bb->e_prevs);
+ INIT_LIST_HEAD(&bb->e_succs);
+ list_add(&bb->l, after);
+
+ return bb;
+}
+
+static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
+ struct bpf_insn *end)
+{
+ struct func_node *func, *last_func;
+
+ func = cfg_append_func(cfg, cur);
+ if (!func)
+ return true;
+
+ for (; cur < end; cur++) {
+ if (cur->code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (cur->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ func = cfg_append_func(cfg, cur + cur->off + 1);
+ if (!func)
+ return true;
+ }
+
+ last_func = cfg_last_func(cfg);
+ last_func->end = end - 1;
+ func = cfg_first_func(cfg);
+ list_for_each_entry_from(func, &last_func->l, l) {
+ func->end = func_next(func)->start - 1;
+ }
+
+ return false;
+}
+
+static bool func_partition_bb_head(struct func_node *func)
+{
+ struct bpf_insn *cur, *end;
+ struct bb_node *bb;
+
+ cur = func->start;
+ end = func->end;
+ INIT_LIST_HEAD(&func->bbs);
+ bb = func_append_bb(func, cur);
+ if (!bb)
+ return true;
+
+ for (; cur <= end; cur++) {
+ if (BPF_CLASS(cur->code) == BPF_JMP) {
+ u8 opcode = BPF_OP(cur->code);
+
+ if (opcode == BPF_EXIT || opcode == BPF_CALL)
+ continue;
+
+ bb = func_append_bb(func, cur + cur->off + 1);
+ if (!bb)
+ return true;
+
+ if (opcode != BPF_JA) {
+ bb = func_append_bb(func, cur + 1);
+ if (!bb)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void func_partition_bb_tail(struct func_node *func)
+{
+ unsigned int bb_idx = NUM_FIXED_BLOCKS;
+ struct bb_node *bb, *last;
+
+ last = func_last_bb(func);
+ last->tail = func->end;
+ bb = func_first_bb(func);
+ list_for_each_entry_from(bb, &last->l, l) {
+ bb->tail = bb_next(bb)->head - 1;
+ bb->idx = bb_idx++;
+ }
+
+ last->idx = bb_idx++;
+ func->bb_num = bb_idx;
+}
+
+static bool func_add_special_bb(struct func_node *func)
+{
+ struct bb_node *bb;
+
+ bb = func_insert_dummy_bb(&func->bbs);
+ if (!bb)
+ return true;
+ bb->idx = ENTRY_BLOCK_INDEX;
+
+ bb = func_insert_dummy_bb(&func_last_bb(func)->l);
+ if (!bb)
+ return true;
+ bb->idx = EXIT_BLOCK_INDEX;
+
+ return false;
+}
+
+static bool func_partition_bb(struct func_node *func)
+{
+ if (func_partition_bb_head(func))
+ return true;
+
+ func_partition_bb_tail(func);
+
+ return false;
+}
+
+static struct bb_node *func_search_bb_with_head(struct func_node *func,
+ struct bpf_insn *insn)
+{
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ if (bb->head == insn)
+ return bb;
+ }
+
+ return NULL;
+}
+
+static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
+ int flags)
+{
+ struct edge_node *e;
+
+ e = calloc(1, sizeof(*e));
+ if (!e) {
+ p_err("OOM when allocating edge node");
+ return NULL;
+ }
+
+ if (src)
+ e->src = src;
+ if (dst)
+ e->dst = dst;
+
+ e->flags |= flags;
+
+ return e;
+}
+
+static bool func_add_bb_edges(struct func_node *func)
+{
+ struct bpf_insn *insn;
+ struct edge_node *e;
+ struct bb_node *bb;
+
+ bb = entry_bb(func);
+ e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
+ if (!e)
+ return true;
+ list_add_tail(&e->l, &bb->e_succs);
+
+ bb = exit_bb(func);
+ e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
+ if (!e)
+ return true;
+ list_add_tail(&e->l, &bb->e_prevs);
+
+ bb = entry_bb(func);
+ bb = bb_next(bb);
+ list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
+ e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
+ if (!e)
+ return true;
+ e->src = bb;
+
+ insn = bb->tail;
+ if (BPF_CLASS(insn->code) != BPF_JMP ||
+ BPF_OP(insn->code) == BPF_EXIT) {
+ e->dst = bb_next(bb);
+ e->flags |= EDGE_FLAG_FALLTHROUGH;
+ list_add_tail(&e->l, &bb->e_succs);
+ continue;
+ } else if (BPF_OP(insn->code) == BPF_JA) {
+ e->dst = func_search_bb_with_head(func,
+ insn + insn->off + 1);
+ e->flags |= EDGE_FLAG_JUMP;
+ list_add_tail(&e->l, &bb->e_succs);
+ continue;
+ }
+
+ e->dst = bb_next(bb);
+ e->flags |= EDGE_FLAG_FALLTHROUGH;
+ list_add_tail(&e->l, &bb->e_succs);
+
+ e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
+ if (!e)
+ return true;
+ e->src = bb;
+ e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
+ list_add_tail(&e->l, &bb->e_succs);
+ }
+
+ return false;
+}
+
+static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
+{
+ int cnt = len / sizeof(*insn);
+ struct func_node *func;
+
+ INIT_LIST_HEAD(&cfg->funcs);
+
+ if (cfg_partition_funcs(cfg, insn, insn + cnt))
+ return true;
+
+ list_for_each_entry(func, &cfg->funcs, l) {
+ if (func_partition_bb(func) || func_add_special_bb(func))
+ return true;
+
+ if (func_add_bb_edges(func))
+ return true;
+ }
+
+ return false;
+}
+
+static void cfg_destroy(struct cfg *cfg)
+{
+ struct func_node *func, *func2;
+
+ list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
+ struct bb_node *bb, *bb2;
+
+ list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
+ struct edge_node *e, *e2;
+
+ list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
+ list_del(&e->l);
+ free(e);
+ }
+
+ list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
+ list_del(&e->l);
+ free(e);
+ }
+
+ list_del(&bb->l);
+ free(bb);
+ }
+
+ list_del(&func->l);
+ free(func);
+ }
+}
+
+static void draw_bb_node(struct func_node *func, struct bb_node *bb)
+{
+ const char *shape;
+
+ if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
+ shape = "Mdiamond";
+ else
+ shape = "record";
+
+ printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
+ func->idx, bb->idx, shape);
+
+ if (bb->idx == ENTRY_BLOCK_INDEX) {
+ printf("ENTRY");
+ } else if (bb->idx == EXIT_BLOCK_INDEX) {
+ printf("EXIT");
+ } else {
+ unsigned int start_idx;
+ struct dump_data dd = {};
+
+ printf("{");
+ kernel_syms_load(&dd);
+ start_idx = bb->head - func->start;
+ dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
+ kernel_syms_destroy(&dd);
+ printf("}");
+ }
+
+ printf("\"];\n\n");
+}
+
+static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
+{
+ const char *style = "\"solid,bold\"";
+ const char *color = "black";
+ int func_idx = func->idx;
+ struct edge_node *e;
+ int weight = 10;
+
+ if (list_empty(&bb->e_succs))
+ return;
+
+ list_for_each_entry(e, &bb->e_succs, l) {
+ printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
+ func_idx, e->src->idx, func_idx, e->dst->idx,
+ style, color, weight);
+ printf("];\n");
+ }
+}
+
+static void func_output_bb_def(struct func_node *func)
+{
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ draw_bb_node(func, bb);
+ }
+}
+
+static void func_output_edges(struct func_node *func)
+{
+ int func_idx = func->idx;
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ draw_bb_succ_edges(func, bb);
+ }
+
+ /* Add an invisible edge from ENTRY to EXIT, this is to
+ * improve the graph layout.
+ */
+ printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
+ func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
+}
+
+static void cfg_dump(struct cfg *cfg)
+{
+ struct func_node *func;
+
+ printf("digraph \"DOT graph for eBPF program\" {\n");
+ list_for_each_entry(func, &cfg->funcs, l) {
+ printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
+ func->idx, func->idx);
+ func_output_bb_def(func);
+ func_output_edges(func);
+ printf("}\n");
+ }
+ printf("}\n");
+}
+
+void dump_xlated_cfg(void *buf, unsigned int len)
+{
+ struct bpf_insn *insn = buf;
+ struct cfg cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ if (cfg_build(&cfg, insn, len))
+ return;
+
+ cfg_dump(&cfg);
+
+ cfg_destroy(&cfg);
+}
diff --git a/tools/bpf/bpftool/cfg.h b/tools/bpf/bpftool/cfg.h
new file mode 100644
index 000000000000..2cc9bd990b13
--- /dev/null
+++ b/tools/bpf/bpftool/cfg.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __BPF_TOOL_CFG_H
+#define __BPF_TOOL_CFG_H
+
+void dump_xlated_cfg(void *buf, unsigned int len);
+
+#endif /* __BPF_TOOL_CFG_H */
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 185acfa229b5..1ec852d21d44 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -46,6 +46,9 @@
#include "main.h"
+#define BATCH_LINE_LEN_MAX 65536
+#define BATCH_ARG_NB_MAX 4096
+
const char *bin_name;
static int last_argc;
static char **last_argv;
@@ -157,6 +160,54 @@ void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep)
}
}
+/* Split command line into argument vector. */
+static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
+{
+ static const char ws[] = " \t\r\n";
+ char *cp = line;
+ int n_argc = 0;
+
+ while (*cp) {
+ /* Skip leading whitespace. */
+ cp += strspn(cp, ws);
+
+ if (*cp == '\0')
+ break;
+
+ if (n_argc >= (maxargs - 1)) {
+ p_err("too many arguments to command %d", cmd_nb);
+ return -1;
+ }
+
+ /* Word begins with quote. */
+ if (*cp == '\'' || *cp == '"') {
+ char quote = *cp++;
+
+ n_argv[n_argc++] = cp;
+ /* Find ending quote. */
+ cp = strchr(cp, quote);
+ if (!cp) {
+ p_err("unterminated quoted string in command %d",
+ cmd_nb);
+ return -1;
+ }
+ } else {
+ n_argv[n_argc++] = cp;
+
+ /* Find end of word. */
+ cp += strcspn(cp, ws);
+ if (*cp == '\0')
+ break;
+ }
+
+ /* Separate words. */
+ *cp++ = 0;
+ }
+ n_argv[n_argc] = NULL;
+
+ return n_argc;
+}
+
static int do_batch(int argc, char **argv);
static const struct cmd cmds[] = {
@@ -171,11 +222,12 @@ static const struct cmd cmds[] = {
static int do_batch(int argc, char **argv)
{
+ char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
+ char *n_argv[BATCH_ARG_NB_MAX];
unsigned int lines = 0;
- char *n_argv[4096];
- char buf[65536];
int n_argc;
FILE *fp;
+ char *cp;
int err;
int i;
@@ -191,7 +243,10 @@ static int do_batch(int argc, char **argv)
}
NEXT_ARG();
- fp = fopen(*argv, "r");
+ if (!strcmp(*argv, "-"))
+ fp = stdin;
+ else
+ fp = fopen(*argv, "r");
if (!fp) {
p_err("Can't open file (%s): %s", *argv, strerror(errno));
return -1;
@@ -200,27 +255,45 @@ static int do_batch(int argc, char **argv)
if (json_output)
jsonw_start_array(json_wtr);
while (fgets(buf, sizeof(buf), fp)) {
+ cp = strchr(buf, '#');
+ if (cp)
+ *cp = '\0';
+
if (strlen(buf) == sizeof(buf) - 1) {
errno = E2BIG;
break;
}
- n_argc = 0;
- n_argv[n_argc] = strtok(buf, " \t\n");
-
- while (n_argv[n_argc]) {
- n_argc++;
- if (n_argc == ARRAY_SIZE(n_argv)) {
- p_err("line %d has too many arguments, skip",
+ /* Append continuation lines if any (coming after a line ending
+ * with '\' in the batch file).
+ */
+ while ((cp = strstr(buf, "\\\n")) != NULL) {
+ if (!fgets(contline, sizeof(contline), fp) ||
+ strlen(contline) == 0) {
+ p_err("missing continuation line on command %d",
lines);
- n_argc = 0;
- break;
+ err = -1;
+ goto err_close;
+ }
+
+ cp = strchr(contline, '#');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(buf) + strlen(contline) + 1 > sizeof(buf)) {
+ p_err("command %d is too long", lines);
+ err = -1;
+ goto err_close;
}
- n_argv[n_argc] = strtok(NULL, " \t\n");
+ buf[strlen(buf) - 2] = '\0';
+ strcat(buf, contline);
}
+ n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
if (!n_argc)
continue;
+ if (n_argc < 0)
+ goto err_close;
if (json_output) {
jsonw_start_object(json_wtr);
@@ -247,11 +320,12 @@ static int do_batch(int argc, char **argv)
p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
- p_info("processed %d lines", lines);
+ p_info("processed %d commands", lines);
err = 0;
}
err_close:
- fclose(fp);
+ if (fp != stdin)
+ fclose(fp);
if (json_output)
jsonw_end_array(json_wtr);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index e549e329be82..f7a810897eac 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -47,8 +47,9 @@
#include <bpf.h>
#include <libbpf.h>
+#include "cfg.h"
#include "main.h"
-#include "disasm.h"
+#include "xlated_dumper.h"
static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
@@ -407,259 +408,6 @@ static int do_show(int argc, char **argv)
return err;
}
-#define SYM_MAX_NAME 256
-
-struct kernel_sym {
- unsigned long address;
- char name[SYM_MAX_NAME];
-};
-
-struct dump_data {
- unsigned long address_call_base;
- struct kernel_sym *sym_mapping;
- __u32 sym_count;
- char scratch_buff[SYM_MAX_NAME];
-};
-
-static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
-{
- return ((struct kernel_sym *)sym_a)->address -
- ((struct kernel_sym *)sym_b)->address;
-}
-
-static void kernel_syms_load(struct dump_data *dd)
-{
- struct kernel_sym *sym;
- char buff[256];
- void *tmp, *address;
- FILE *fp;
-
- fp = fopen("/proc/kallsyms", "r");
- if (!fp)
- return;
-
- while (!feof(fp)) {
- if (!fgets(buff, sizeof(buff), fp))
- break;
- tmp = realloc(dd->sym_mapping,
- (dd->sym_count + 1) *
- sizeof(*dd->sym_mapping));
- if (!tmp) {
-out:
- free(dd->sym_mapping);
- dd->sym_mapping = NULL;
- fclose(fp);
- return;
- }
- dd->sym_mapping = tmp;
- sym = &dd->sym_mapping[dd->sym_count];
- if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
- continue;
- sym->address = (unsigned long)address;
- if (!strcmp(sym->name, "__bpf_call_base")) {
- dd->address_call_base = sym->address;
- /* sysctl kernel.kptr_restrict was set */
- if (!sym->address)
- goto out;
- }
- if (sym->address)
- dd->sym_count++;
- }
-
- fclose(fp);
-
- qsort(dd->sym_mapping, dd->sym_count,
- sizeof(*dd->sym_mapping), kernel_syms_cmp);
-}
-
-static void kernel_syms_destroy(struct dump_data *dd)
-{
- free(dd->sym_mapping);
-}
-
-static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
- unsigned long key)
-{
- struct kernel_sym sym = {
- .address = key,
- };
-
- return dd->sym_mapping ?
- bsearch(&sym, dd->sym_mapping, dd->sym_count,
- sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
-}
-
-static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vprintf(fmt, args);
- va_end(args);
-}
-
-static const char *print_call_pcrel(struct dump_data *dd,
- struct kernel_sym *sym,
- unsigned long address,
- const struct bpf_insn *insn)
-{
- if (sym)
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "%+d#%s", insn->off, sym->name);
- else
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "%+d#0x%lx", insn->off, address);
- return dd->scratch_buff;
-}
-
-static const char *print_call_helper(struct dump_data *dd,
- struct kernel_sym *sym,
- unsigned long address)
-{
- if (sym)
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "%s", sym->name);
- else
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "0x%lx", address);
- return dd->scratch_buff;
-}
-
-static const char *print_call(void *private_data,
- const struct bpf_insn *insn)
-{
- struct dump_data *dd = private_data;
- unsigned long address = dd->address_call_base + insn->imm;
- struct kernel_sym *sym;
-
- sym = kernel_syms_search(dd, address);
- if (insn->src_reg == BPF_PSEUDO_CALL)
- return print_call_pcrel(dd, sym, address, insn);
- else
- return print_call_helper(dd, sym, address);
-}
-
-static const char *print_imm(void *private_data,
- const struct bpf_insn *insn,
- __u64 full_imm)
-{
- struct dump_data *dd = private_data;
-
- if (insn->src_reg == BPF_PSEUDO_MAP_FD)
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "map[id:%u]", insn->imm);
- else
- snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
- "0x%llx", (unsigned long long)full_imm);
- return dd->scratch_buff;
-}
-
-static void dump_xlated_plain(struct dump_data *dd, void *buf,
- unsigned int len, bool opcodes)
-{
- const struct bpf_insn_cbs cbs = {
- .cb_print = print_insn,
- .cb_call = print_call,
- .cb_imm = print_imm,
- .private_data = dd,
- };
- struct bpf_insn *insn = buf;
- bool double_insn = false;
- unsigned int i;
-
- for (i = 0; i < len / sizeof(*insn); i++) {
- if (double_insn) {
- double_insn = false;
- continue;
- }
-
- double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
-
- printf("% 4d: ", i);
- print_bpf_insn(&cbs, NULL, insn + i, true);
-
- if (opcodes) {
- printf(" ");
- fprint_hex(stdout, insn + i, 8, " ");
- if (double_insn && i < len - 1) {
- printf(" ");
- fprint_hex(stdout, insn + i + 1, 8, " ");
- }
- printf("\n");
- }
- }
-}
-
-static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
-{
- unsigned int l = strlen(fmt);
- char chomped_fmt[l];
- va_list args;
-
- va_start(args, fmt);
- if (l > 0) {
- strncpy(chomped_fmt, fmt, l - 1);
- chomped_fmt[l - 1] = '\0';
- }
- jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
- va_end(args);
-}
-
-static void dump_xlated_json(struct dump_data *dd, void *buf,
- unsigned int len, bool opcodes)
-{
- const struct bpf_insn_cbs cbs = {
- .cb_print = print_insn_json,
- .cb_call = print_call,
- .cb_imm = print_imm,
- .private_data = dd,
- };
- struct bpf_insn *insn = buf;
- bool double_insn = false;
- unsigned int i;
-
- jsonw_start_array(json_wtr);
- for (i = 0; i < len / sizeof(*insn); i++) {
- if (double_insn) {
- double_insn = false;
- continue;
- }
- double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
-
- jsonw_start_object(json_wtr);
- jsonw_name(json_wtr, "disasm");
- print_bpf_insn(&cbs, NULL, insn + i, true);
-
- if (opcodes) {
- jsonw_name(json_wtr, "opcodes");
- jsonw_start_object(json_wtr);
-
- jsonw_name(json_wtr, "code");
- jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
-
- jsonw_name(json_wtr, "src_reg");
- jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
-
- jsonw_name(json_wtr, "dst_reg");
- jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
-
- jsonw_name(json_wtr, "off");
- print_hex_data_json((uint8_t *)(&insn[i].off), 2);
-
- jsonw_name(json_wtr, "imm");
- if (double_insn && i < len - 1)
- print_hex_data_json((uint8_t *)(&insn[i].imm),
- 12);
- else
- print_hex_data_json((uint8_t *)(&insn[i].imm),
- 4);
- jsonw_end_object(json_wtr);
- }
- jsonw_end_object(json_wtr);
- }
- jsonw_end_array(json_wtr);
-}
-
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info info = {};
@@ -668,6 +416,7 @@ static int do_dump(int argc, char **argv)
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
+ bool visual = false;
unsigned char *buf;
__u32 *member_len;
__u64 *member_ptr;
@@ -706,6 +455,9 @@ static int do_dump(int argc, char **argv)
} else if (is_prefix(*argv, "opcodes")) {
opcodes = true;
NEXT_ARG();
+ } else if (is_prefix(*argv, "visual")) {
+ visual = true;
+ NEXT_ARG();
}
if (argc) {
@@ -777,27 +529,30 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_null(json_wtr);
- } else {
- if (member_len == &info.jited_prog_len) {
- const char *name = NULL;
-
- if (info.ifindex) {
- name = ifindex_to_bfd_name_ns(info.ifindex,
- info.netns_dev,
- info.netns_ino);
- if (!name)
- goto err_free;
- }
-
- disasm_print_insn(buf, *member_len, opcodes, name);
- } else {
- kernel_syms_load(&dd);
- if (json_output)
- dump_xlated_json(&dd, buf, *member_len, opcodes);
- else
- dump_xlated_plain(&dd, buf, *member_len, opcodes);
- kernel_syms_destroy(&dd);
+ } else if (member_len == &info.jited_prog_len) {
+ const char *name = NULL;
+
+ if (info.ifindex) {
+ name = ifindex_to_bfd_name_ns(info.ifindex,
+ info.netns_dev,
+ info.netns_ino);
+ if (!name)
+ goto err_free;
}
+
+ disasm_print_insn(buf, *member_len, opcodes, name);
+ } else if (visual) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ dump_xlated_cfg(buf, *member_len);
+ } else {
+ kernel_syms_load(&dd);
+ if (json_output)
+ dump_xlated_json(&dd, buf, *member_len, opcodes);
+ else
+ dump_xlated_plain(&dd, buf, *member_len, opcodes);
+ kernel_syms_destroy(&dd);
}
free(buf);
@@ -851,7 +606,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [PROG]\n"
- " %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
+ " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
" %s %s load OBJ FILE\n"
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
new file mode 100644
index 000000000000..20da835e9e38
--- /dev/null
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "disasm.h"
+#include "json_writer.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+ return ((struct kernel_sym *)sym_a)->address -
+ ((struct kernel_sym *)sym_b)->address;
+}
+
+void kernel_syms_load(struct dump_data *dd)
+{
+ struct kernel_sym *sym;
+ char buff[256];
+ void *tmp, *address;
+ FILE *fp;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp)
+ return;
+
+ while (!feof(fp)) {
+ if (!fgets(buff, sizeof(buff), fp))
+ break;
+ tmp = realloc(dd->sym_mapping,
+ (dd->sym_count + 1) *
+ sizeof(*dd->sym_mapping));
+ if (!tmp) {
+out:
+ free(dd->sym_mapping);
+ dd->sym_mapping = NULL;
+ fclose(fp);
+ return;
+ }
+ dd->sym_mapping = tmp;
+ sym = &dd->sym_mapping[dd->sym_count];
+ if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+ continue;
+ sym->address = (unsigned long)address;
+ if (!strcmp(sym->name, "__bpf_call_base")) {
+ dd->address_call_base = sym->address;
+ /* sysctl kernel.kptr_restrict was set */
+ if (!sym->address)
+ goto out;
+ }
+ if (sym->address)
+ dd->sym_count++;
+ }
+
+ fclose(fp);
+
+ qsort(dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+void kernel_syms_destroy(struct dump_data *dd)
+{
+ free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+ unsigned long key)
+{
+ struct kernel_sym sym = {
+ .address = key,
+ };
+
+ return dd->sym_mapping ?
+ bsearch(&sym, dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
+static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+static void
+print_insn_for_graph(struct bpf_verifier_env *env, const char *fmt, ...)
+{
+ char buf[64], *p;
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ p = buf;
+ while (*p != '\0') {
+ if (*p == '\n') {
+ memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
+ /* Align each instruction dump row left. */
+ *p++ = '\\';
+ *p++ = 'l';
+ /* Output multiline concatenation. */
+ *p++ = '\\';
+ } else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
+ memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
+ /* Escape special character. */
+ *p++ = '\\';
+ }
+
+ p++;
+ }
+
+ printf("%s", buf);
+}
+
+static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
+{
+ unsigned int l = strlen(fmt);
+ char chomped_fmt[l];
+ va_list args;
+
+ va_start(args, fmt);
+ if (l > 0) {
+ strncpy(chomped_fmt, fmt, l - 1);
+ chomped_fmt[l - 1] = '\0';
+ }
+ jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
+ va_end(args);
+}
+
+static const char *print_call_pcrel(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address,
+ const struct bpf_insn *insn)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#%s", insn->off, sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#0x%lx", insn->off, address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%s", sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%lx", address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+ const struct bpf_insn *insn)
+{
+ struct dump_data *dd = private_data;
+ unsigned long address = dd->address_call_base + insn->imm;
+ struct kernel_sym *sym;
+
+ sym = kernel_syms_search(dd, address);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return print_call_pcrel(dd, sym, address, insn);
+ else
+ return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm)
+{
+ struct dump_data *dd = private_data;
+
+ if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u]", insn->imm);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%llx", (unsigned long long)full_imm);
+ return dd->scratch_buff;
+}
+
+void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes)
+{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_json,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ struct bpf_insn *insn = buf;
+ bool double_insn = false;
+ unsigned int i;
+
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < len / sizeof(*insn); i++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "disasm");
+ print_bpf_insn(&cbs, NULL, insn + i, true);
+
+ if (opcodes) {
+ jsonw_name(json_wtr, "opcodes");
+ jsonw_start_object(json_wtr);
+
+ jsonw_name(json_wtr, "code");
+ jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
+
+ jsonw_name(json_wtr, "src_reg");
+ jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
+
+ jsonw_name(json_wtr, "dst_reg");
+ jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
+
+ jsonw_name(json_wtr, "off");
+ print_hex_data_json((uint8_t *)(&insn[i].off), 2);
+
+ jsonw_name(json_wtr, "imm");
+ if (double_insn && i < len - 1)
+ print_hex_data_json((uint8_t *)(&insn[i].imm),
+ 12);
+ else
+ print_hex_data_json((uint8_t *)(&insn[i].imm),
+ 4);
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes)
+{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ struct bpf_insn *insn = buf;
+ bool double_insn = false;
+ unsigned int i;
+
+ for (i = 0; i < len / sizeof(*insn); i++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+
+ double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ printf("% 4d: ", i);
+ print_bpf_insn(&cbs, NULL, insn + i, true);
+
+ if (opcodes) {
+ printf(" ");
+ fprint_hex(stdout, insn + i, 8, " ");
+ if (double_insn && i < len - 1) {
+ printf(" ");
+ fprint_hex(stdout, insn + i + 1, 8, " ");
+ }
+ printf("\n");
+ }
+ }
+}
+
+void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
+ unsigned int start_idx)
+{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_for_graph,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ struct bpf_insn *insn_start = buf_start;
+ struct bpf_insn *insn_end = buf_end;
+ struct bpf_insn *cur = insn_start;
+
+ for (; cur <= insn_end; cur++) {
+ printf("% 4d: ", (int)(cur - insn_start + start_idx));
+ print_bpf_insn(&cbs, NULL, cur, true);
+ if (cur != insn_end)
+ printf(" | ");
+ }
+}
diff --git a/tools/bpf/bpftool/xlated_dumper.h b/tools/bpf/bpftool/xlated_dumper.h
new file mode 100644
index 000000000000..51c935d38ae2
--- /dev/null
+++ b/tools/bpf/bpftool/xlated_dumper.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __BPF_TOOL_XLATED_DUMPER_H
+#define __BPF_TOOL_XLATED_DUMPER_H
+
+#define SYM_MAX_NAME 256
+
+struct kernel_sym {
+ unsigned long address;
+ char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+ unsigned long address_call_base;
+ struct kernel_sym *sym_mapping;
+ __u32 sym_count;
+ char scratch_buff[SYM_MAX_NAME];
+};
+
+void kernel_syms_load(struct dump_data *dd);
+void kernel_syms_destroy(struct dump_data *dd);
+void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes);
+void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes);
+void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
+ unsigned int start_index);
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h
new file mode 100644
index 000000000000..9dac9b30f8ef
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_rlimit.h
@@ -0,0 +1,28 @@
+#include <sys/resource.h>
+#include <stdio.h>
+
+static __attribute__((constructor)) void bpf_rlimit_ctor(void)
+{
+ struct rlimit rlim_old, rlim_new = {
+ .rlim_cur = RLIM_INFINITY,
+ .rlim_max = RLIM_INFINITY,
+ };
+
+ getrlimit(RLIMIT_MEMLOCK, &rlim_old);
+ /* For the sake of running the test cases, we temporarily
+ * set rlimit to infinity in order for kernel to focus on
+ * errors from actual test cases and not getting noise
+ * from hitting memlock limits. The limit is on per-process
+ * basis and not a global one, hence destructor not really
+ * needed here.
+ */
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
+ perror("Unable to lift memlock rlimit");
+ /* Trying out lower limit, but expect potential test
+ * case failures from this!
+ */
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+ }
+}
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index ff8bd7e3e50c..6b1b302310fe 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -9,8 +9,6 @@
#include <stddef.h>
#include <stdbool.h>
-#include <sys/resource.h>
-
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
@@ -19,6 +17,7 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
+#include "bpf_rlimit.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -702,9 +701,6 @@ static int do_test(unsigned int from, unsigned int to)
int main(int argc, char **argv)
{
unsigned int from = 0, to = ARRAY_SIZE(tests);
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 3489cc283433..9c8b50bac7e0 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -11,13 +11,13 @@
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
@@ -25,15 +25,11 @@
int main(int argc, char **argv)
{
- struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
struct bpf_object *obj;
int error = EXIT_FAILURE;
int prog_fd, cgroup_fd;
__u32 prog_cnt;
- if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
- perror("Unable to lift memlock rlimit");
-
if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 2be87e9ee28d..147e34cfceb7 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -22,10 +22,11 @@
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
+
#include "bpf_util.h"
+#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
@@ -736,17 +737,11 @@ static void test_lpm_multi_thread(void)
int main(void)
{
- struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
- int i, ret;
+ int i;
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
- /* allow unlimited locked memory */
- ret = setrlimit(RLIMIT_MEMLOCK, &limit);
- if (ret < 0)
- perror("Unable to lift memlock rlimit");
-
test_lpm_basic();
test_lpm_order();
@@ -755,11 +750,8 @@ int main(void)
test_lpm_map(i);
test_lpm_ipaddr();
-
test_lpm_delete();
-
test_lpm_get_next_key();
-
test_lpm_multi_thread();
printf("test_lpm: OK\n");
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 8c10c9180c1a..781c7de343be 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -16,10 +16,11 @@
#include <time.h>
#include <sys/wait.h>
-#include <sys/resource.h>
#include <bpf/bpf.h>
+
#include "bpf_util.h"
+#include "bpf_rlimit.h"
#define LOCAL_FREE_TARGET (128)
#define PERCPU_FREE_TARGET (4)
@@ -613,7 +614,6 @@ static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
int main(int argc, char **argv)
{
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH};
int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
@@ -621,8 +621,6 @@ int main(int argc, char **argv)
setbuf(stdout, NULL);
- assert(!setrlimit(RLIMIT_MEMLOCK, &r));
-
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 9e03a4c356a4..1238733c5b33 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -17,13 +17,14 @@
#include <stdlib.h>
#include <sys/wait.h>
-#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+
#include "bpf_util.h"
+#include "bpf_rlimit.h"
static int map_flags;
@@ -1126,10 +1127,6 @@ static void run_all_tests(void)
int main(void)
{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-
map_flags = 0;
run_all_tests();
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index b549308abd19..27ad5404389e 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -26,7 +26,6 @@ typedef __u16 __sum16;
#include <sys/ioctl.h>
#include <sys/wait.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <fcntl.h>
@@ -34,9 +33,11 @@ typedef __u16 __sum16;
#include <linux/err.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+
#include "test_iptunnel_common.h"
#include "bpf_util.h"
#include "bpf_endian.h"
+#include "bpf_rlimit.h"
static int error_cnt, pass_cnt;
@@ -965,10 +966,6 @@ out:
int main(void)
{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-
test_pkt_access();
test_xdp();
test_l4lb_all();
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 8b201895c569..6272c784ca2a 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -12,7 +12,6 @@
#include <assert.h>
#include <sys/socket.h>
-#include <sys/resource.h>
#include <linux/filter.h>
#include <linux/bpf.h>
@@ -21,6 +20,7 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
+#include "bpf_rlimit.h"
static struct bpf_insn prog[BPF_MAXINSNS];
@@ -184,11 +184,9 @@ static void do_test(uint32_t *tests, int start_insns, int fd_map,
int main(void)
{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
uint32_t tests = 0;
int i, fd_map;
- setrlimit(RLIMIT_MEMLOCK, &rinf);
fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int),
sizeof(int), 1, BPF_F_NO_PREALLOC);
assert(fd_map > 0);
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index 5d73db416460..84ab5163c828 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -12,13 +12,13 @@
#include <linux/bpf.h>
#include <sys/ioctl.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
+#include "bpf_rlimit.h"
#include <linux/perf_event.h>
#include "test_tcpbpf.h"
@@ -44,7 +44,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
int main(int argc, char **argv)
{
- struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
const char *file = "test_tcpbpf_kern.o";
struct tcpbpf_globals g = {0};
int cg_fd, prog_fd, map_fd;
@@ -57,9 +56,6 @@ int main(int argc, char **argv)
int pid;
int rv;
- if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
- perror("Unable to lift memlock rlimit");
-
if (argc > 1 && strcmp(argv[1], "-d") == 0)
debug_flag = true;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2164d218322e..9eb05f3135ac 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -24,7 +24,6 @@
#include <limits.h>
#include <sys/capability.h>
-#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
@@ -41,7 +40,7 @@
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
-
+#include "bpf_rlimit.h"
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
@@ -2590,17 +2589,74 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "runtime/jit: tail_call within bounds, prog once",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 1 },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "runtime/jit: tail_call within bounds, prog loop",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 1 },
+ .result = ACCEPT,
+ .retval = 41,
+ },
+ {
+ "runtime/jit: tail_call within bounds, no prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "runtime/jit: tail_call out of bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 256),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+ },
+ {
"runtime/jit: pass negative index to tail_call",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, -1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
+ .retval = 2,
},
{
"runtime/jit: pass > 32bit index to tail_call",
@@ -2609,11 +2665,12 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 2 },
.result = ACCEPT,
+ .retval = 42,
},
{
"stack pointer arithmetic",
@@ -11279,16 +11336,61 @@ static int create_map(uint32_t size_value, uint32_t max_elem)
return fd;
}
+static int create_prog_dummy1(void)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ };
+
+ return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
+ ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+}
+
+static int create_prog_dummy2(int mfd, int idx)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_IMM(BPF_REG_3, idx),
+ BPF_LD_MAP_FD(BPF_REG_2, mfd),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 41),
+ BPF_EXIT_INSN(),
+ };
+
+ return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
+ ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+}
+
static int create_prog_array(void)
{
- int fd;
+ int p1key = 0, p2key = 1;
+ int mfd, p1fd, p2fd;
- fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
- sizeof(int), 4, 0);
- if (fd < 0)
+ mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
+ sizeof(int), 4, 0);
+ if (mfd < 0) {
printf("Failed to create prog array '%s'!\n", strerror(errno));
+ return -1;
+ }
- return fd;
+ p1fd = create_prog_dummy1();
+ p2fd = create_prog_dummy2(mfd, p2key);
+ if (p1fd < 0 || p2fd < 0)
+ goto out;
+ if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
+ goto out;
+ if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
+ goto out;
+ close(p2fd);
+ close(p1fd);
+
+ return mfd;
+out:
+ close(p2fd);
+ close(p1fd);
+ close(mfd);
+ return -1;
}
static int create_map_in_map(void)
@@ -11543,8 +11645,6 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
int main(int argc, char **argv)
{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
- struct rlimit rlim = { 1 << 20, 1 << 20 };
unsigned int from = 0, to = ARRAY_SIZE(tests);
bool unpriv = !is_admin();
@@ -11572,6 +11672,5 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
- setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
return do_test(unpriv, from, to);
}
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index e9626cf5607a..8d6918c3b4a2 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -4,7 +4,6 @@
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -12,6 +11,8 @@
#include <bpf/bpf.h>
+#include "bpf_rlimit.h"
+
#define LOG_SIZE (1 << 20)
#define err(str...) printf("ERROR: " str)
@@ -133,16 +134,11 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
int main(int argc, char **argv)
{
- struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
char full_log[LOG_SIZE];
char log[LOG_SIZE];
size_t want_len;
int i;
- /* allow unlimited locked memory to have more consistent error code */
- if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
- perror("Unable to lift memlock rlimit");
-
memset(log, 1, LOG_SIZE);
/* Test incorrect attr */