summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-04-02 20:57:09 +0300
committerJakub Kicinski <kuba@kernel.org>2026-04-09 23:20:59 +0300
commitb6e39e48469e37057fce27a1b87cf6d3e456aa42 (patch)
tree1aa6c06171088292548cf8ba04d4e9e0cf1b5b43 /tools
parent9700282a7ec721e285771d995ccfe33845e776dc (diff)
parenta55f7f5f29b32c2c53cc291899cf9b0c25a07f7c (diff)
downloadlinux-b6e39e48469e37057fce27a1b87cf6d3e456aa42.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-7.0-rc8). Conflicts: net/ipv6/seg6_iptunnel.c c3812651b522f ("seg6: separate dst_cache for input and output paths in seg6 lwtunnel") 78723a62b969a ("seg6: add per-route tunnel source address") https://lore.kernel.org/adZhwtOYfo-0ImSa@sirena.org.uk net/ipv4/icmp.c fde29fd934932 ("ipv4: icmp: fix null-ptr-deref in icmp_build_probe()") d98adfbdd5c01 ("ipv4: drop ipv6_stub usage and use direct function calls") https://lore.kernel.org/adO3dccqnr6j-BL9@sirena.org.uk Adjacent changes: drivers/net/ethernet/stmicro/stmmac/chain_mode.c 51f4e090b9f8 ("net: stmmac: fix integer underflow in chain mode") 6b4286e05508 ("net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.h23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xsk.c19
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c341
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c4
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c23
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh1
-rw-r--r--tools/testing/selftests/net/netfilter/nf_queue.c50
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh83
-rwxr-xr-xtools/testing/selftests/net/srv6_iptunnel_cache.sh197
-rw-r--r--tools/testing/selftests/riscv/vector/validate_v_ptrace.c19
-rw-r--r--tools/testing/vsock/util.c8
13 files changed, 766 insertions, 58 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index 7e38ec6e656b..7950c504ed28 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -179,25 +179,6 @@ int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem
return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
}
-#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
-static unsigned int get_max_skb_frags(void)
-{
- unsigned int max_skb_frags = 0;
- FILE *file;
-
- file = fopen(MAX_SKB_FRAGS_PATH, "r");
- if (!file) {
- ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
- return 0;
- }
-
- if (fscanf(file, "%u", &max_skb_frags) != 1)
- ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
-
- fclose(file);
- return max_skb_frags;
-}
-
static int set_ring_size(struct ifobject *ifobj)
{
int ret;
@@ -1978,15 +1959,17 @@ int testapp_headroom(struct test_spec *test)
int testapp_stats_rx_dropped(struct test_spec *test)
{
+ u32 umem_tr = test->ifobj_tx->umem_tailroom;
+
if (test->mode == TEST_MODE_ZC) {
ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
return TEST_SKIP;
}
- if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0))
+ if (pkt_stream_replace_half(test, (MIN_PKT_SIZE * 3) + umem_tr, 0))
return TEST_FAILURE;
test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
- XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
+ XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr;
if (pkt_stream_receive_half(test))
return TEST_FAILURE;
test->ifobj_rx->validation_func = validate_rx_dropped;
@@ -2242,11 +2225,7 @@ int testapp_too_many_frags(struct test_spec *test)
if (test->mode == TEST_MODE_ZC) {
max_frags = test->ifobj_tx->xdp_zc_max_segs;
} else {
- max_frags = get_max_skb_frags();
- if (!max_frags) {
- ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
- max_frags = 17;
- }
+ max_frags = test->ifobj_tx->max_skb_frags;
max_frags += 1;
}
@@ -2551,16 +2530,34 @@ int testapp_adjust_tail_shrink_mb(struct test_spec *test)
int testapp_adjust_tail_grow(struct test_spec *test)
{
+ if (test->mode == TEST_MODE_SKB)
+ return TEST_SKIP;
+
/* Grow by 4 bytes for testing purpose */
return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
}
int testapp_adjust_tail_grow_mb(struct test_spec *test)
{
+ u32 grow_size;
+
+ if (test->mode == TEST_MODE_SKB)
+ return TEST_SKIP;
+
+ /* worst case scenario is when underlying setup will work on 3k
+ * buffers, let us account for it; given that we will use 6k as
+ * pkt_len, expect that it will be broken down to 2 descs each
+ * with 3k payload;
+ *
+ * 4k is truesize, 3k payload, 256 HR, 320 TR;
+ */
+ grow_size = XSK_UMEM__MAX_FRAME_SIZE -
+ XSK_UMEM__LARGE_FRAME_SIZE -
+ XDP_PACKET_HEADROOM -
+ test->ifobj_tx->umem_tailroom;
test->mtu = MAX_ETH_JUMBO_SIZE;
- /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
- return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
- XSK_UMEM__LARGE_FRAME_SIZE * 2);
+
+ return testapp_adjust_tail(test, grow_size, XSK_UMEM__LARGE_FRAME_SIZE * 2);
}
int testapp_tx_queue_consumer(struct test_spec *test)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.h b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
index 8fc78a057de0..1ab8aee4ce56 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.h
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
@@ -31,6 +31,9 @@
#define SOCK_RECONF_CTR 10
#define USLEEP_MAX 10000
+#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
+#define SMP_CACHE_BYTES_PATH "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size"
+
extern bool opt_verbose;
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -45,6 +48,24 @@ static inline u64 ceil_u64(u64 a, u64 b)
return (a + b - 1) / b;
}
+static inline unsigned int read_procfs_val(const char *path)
+{
+ unsigned int read_val = 0;
+ FILE *file;
+
+ file = fopen(path, "r");
+ if (!file) {
+ ksft_print_msg("Error opening %s\n", path);
+ return 0;
+ }
+
+ if (fscanf(file, "%u", &read_val) != 1)
+ ksft_print_msg("Error reading %s\n", path);
+
+ fclose(file);
+ return read_val;
+}
+
/* Simple test */
enum test_mode {
TEST_MODE_SKB,
@@ -115,6 +136,8 @@ struct ifobject {
int mtu;
u32 bind_flags;
u32 xdp_zc_max_segs;
+ u32 umem_tailroom;
+ u32 max_skb_frags;
bool tx_on;
bool rx_on;
bool use_poll;
diff --git a/tools/testing/selftests/bpf/prog_tests/xsk.c b/tools/testing/selftests/bpf/prog_tests/xsk.c
index dd4c35c0e428..6e2f63ee2a6c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/xsk.c
@@ -62,6 +62,7 @@ int configure_ifobj(struct ifobject *tx, struct ifobject *rx)
static void test_xsk(const struct test_spec *test_to_run, enum test_mode mode)
{
+ u32 max_frags, umem_tailroom, cache_line_size;
struct ifobject *ifobj_tx, *ifobj_rx;
struct test_spec test;
int ret;
@@ -84,6 +85,24 @@ static void test_xsk(const struct test_spec *test_to_run, enum test_mode mode)
ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending;
}
+ cache_line_size = read_procfs_val(SMP_CACHE_BYTES_PATH);
+ if (!cache_line_size)
+ cache_line_size = 64;
+
+ max_frags = read_procfs_val(MAX_SKB_FRAGS_PATH);
+ if (!max_frags)
+ max_frags = 17;
+
+ ifobj_tx->max_skb_frags = max_frags;
+ ifobj_rx->max_skb_frags = max_frags;
+
+ /* 48 bytes is a part of skb_shared_info w/o frags array;
+ * 16 bytes is sizeof(skb_frag_t)
+ */
+ umem_tailroom = ALIGN(48 + (max_frags * 16), cache_line_size);
+ ifobj_tx->umem_tailroom = umem_tailroom;
+ ifobj_rx->umem_tailroom = umem_tailroom;
+
if (!ASSERT_OK(init_iface(ifobj_rx, worker_testapp_validate_rx), "init RX"))
goto delete_rx;
if (!ASSERT_OK(init_iface(ifobj_tx, worker_testapp_validate_tx), "init TX"))
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
index 1fe090cd6744..4794903aec8e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -5,6 +5,13 @@
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} precision_map SEC(".maps");
+
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
@@ -301,4 +308,338 @@ __naked int bpf_neg_5(void)
::: __clobber_all);
}
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_fetch_add_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(fetch_add_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_xchg_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(xchg_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_fetch_or_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(fetch_or_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_fetch_and_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(fetch_and_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_fetch_xor_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */
+ "r3 = r10;"
+ "r3 += r2;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(fetch_xor_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_cmpxchg_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r0 = 0;"
+ "r2 = 0;"
+ ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
+ "r3 = r10;"
+ "r3 += r0;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(cmpxchg_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+/* Regression test for dual precision: Both the fetched value (r2) and
+ * a reread of the same stack slot (r3) are tracked for precision. After
+ * the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at
+ * insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot
+ * propagates solely through the atomic fetch's load side (insn 3).
+ */
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_fetch_add_dual_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = 0;"
+ ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
+ "r3 = *(u64 *)(r10 - 8);"
+ "r4 = r2;"
+ "r4 += r3;"
+ "r4 &= 7;"
+ "r5 = r10;"
+ "r5 += r4;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(fetch_add_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8")
+__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
+__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
+__naked int bpf_atomic_cmpxchg_dual_precision(void)
+{
+ asm volatile (
+ "r1 = 8;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r0 = 8;"
+ "r2 = 0;"
+ ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
+ "r3 = *(u64 *)(r10 - 8);"
+ "r4 = r0;"
+ "r4 += r3;"
+ "r4 &= 7;"
+ "r5 = r10;"
+ "r5 += r4;" /* mark_precise */
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_insn(cmpxchg_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
+__msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
+__not_msg("falling back to forcing all scalars precise")
+__naked int bpf_atomic_fetch_add_map_precision(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[precision_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r1 = 0;"
+ ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */
+ "r1 &= 7;"
+ "r2 = r10;"
+ "r2 += r1;" /* mark_precise */
+ "1: r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(precision_map),
+ __imm(bpf_map_lookup_elem),
+ __imm_insn(fetch_add_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)")
+__not_msg("falling back to forcing all scalars precise")
+__naked int bpf_atomic_cmpxchg_map_precision(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[precision_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r6 = r0;"
+ "r0 = 0;"
+ "r1 = 0;"
+ ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */
+ "r0 &= 7;"
+ "r2 = r10;"
+ "r2 += r0;" /* mark_precise */
+ "1: r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(precision_map),
+ __imm(bpf_map_lookup_elem),
+ __imm_insn(cmpxchg_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
+__msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)")
+__not_msg("falling back to forcing all scalars precise")
+__naked int bpf_atomic_fetch_add_32bit_precision(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[precision_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r1 = 0;"
+ ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */
+ "r1 &= 7;"
+ "r2 = r10;"
+ "r2 += r1;" /* mark_precise */
+ "1: r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(precision_map),
+ __imm(bpf_map_lookup_elem),
+ __imm_insn(fetch_add_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)")
+__not_msg("falling back to forcing all scalars precise")
+__naked int bpf_atomic_cmpxchg_32bit_precision(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[precision_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r6 = r0;"
+ "r0 = 0;"
+ "r1 = 0;"
+ ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */
+ "r0 &= 7;"
+ "r2 = r10;"
+ "r2 += r0;" /* mark_precise */
+ "1: r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(precision_map),
+ __imm(bpf_map_lookup_elem),
+ __imm_insn(cmpxchg_insn,
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
index 683306db8594..023d8befd4ca 100644
--- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -26,8 +26,10 @@ SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp)
SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp)
{
+ static unsigned int drop_idx;
+
/* Drop every other packet */
- if (idx++ % 2)
+ if (drop_idx++ % 2)
return XDP_DROP;
return bpf_redirect_map(&xsk, 0, XDP_DROP);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 05b3cebc5ca9..7dad8556a722 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -80,6 +80,7 @@
#include <linux/mman.h>
#include <linux/netdev.h>
#include <linux/ethtool.h>
+#include <linux/align.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
@@ -333,6 +334,7 @@ static void print_tests(void)
int main(int argc, char **argv)
{
const size_t total_tests = ARRAY_SIZE(tests) + ARRAY_SIZE(ci_skip_tests);
+ u32 cache_line_size, max_frags, umem_tailroom;
struct pkt_stream *rx_pkt_stream_default;
struct pkt_stream *tx_pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
@@ -354,6 +356,27 @@ int main(int argc, char **argv)
setlocale(LC_ALL, "");
+ cache_line_size = read_procfs_val(SMP_CACHE_BYTES_PATH);
+ if (!cache_line_size) {
+ ksft_print_msg("Can't get SMP_CACHE_BYTES from system, using default (64)\n");
+ cache_line_size = 64;
+ }
+
+ max_frags = read_procfs_val(MAX_SKB_FRAGS_PATH);
+ if (!max_frags) {
+ ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
+ max_frags = 17;
+ }
+ ifobj_tx->max_skb_frags = max_frags;
+ ifobj_rx->max_skb_frags = max_frags;
+
+ /* 48 bytes is a part of skb_shared_info w/o frags array;
+ * 16 bytes is sizeof(skb_frag_t)
+ */
+ umem_tailroom = ALIGN(48 + (max_frags * 16), cache_line_size);
+ ifobj_tx->umem_tailroom = umem_tailroom;
+ ifobj_rx->umem_tailroom = umem_tailroom;
+
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
if (opt_print_tests) {
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 6bced3ed798b..cab74ebdaced 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -92,6 +92,7 @@ TEST_PROGS := \
srv6_end_x_next_csid_l3vpn_test.sh \
srv6_hencap_red_l3vpn_test.sh \
srv6_hl2encap_red_l2vpn_test.sh \
+ srv6_iptunnel_cache.sh \
stress_reuseport_listen.sh \
tcp_fastopen_backup_key.sh \
test_bpf.sh \
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
index 72dfbeaf56b9..e8031f68200a 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
@@ -414,6 +414,7 @@ vlmc_querier_intvl_test()
bridge vlan add vid 10 dev br1 self pvid untagged
ip link set dev $h1 master br1
ip link set dev br1 up
+ setup_wait_dev $h1 0
bridge vlan add vid 10 dev $h1 master
bridge vlan global set vid 10 dev br1 mcast_snooping 1 mcast_querier 1
sleep 2
diff --git a/tools/testing/selftests/net/netfilter/nf_queue.c b/tools/testing/selftests/net/netfilter/nf_queue.c
index 116c0ca0eabb..8bbec37f5356 100644
--- a/tools/testing/selftests/net/netfilter/nf_queue.c
+++ b/tools/testing/selftests/net/netfilter/nf_queue.c
@@ -19,6 +19,8 @@ struct options {
bool count_packets;
bool gso_enabled;
bool failopen;
+ bool out_of_order;
+ bool bogus_verdict;
int verbose;
unsigned int queue_num;
unsigned int timeout;
@@ -31,7 +33,7 @@ static struct options opts;
static void help(const char *p)
{
- printf("Usage: %s [-c|-v [-vv] ] [-o] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p);
+ printf("Usage: %s [-c|-v [-vv] ] [-o] [-O] [-b] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p);
}
static int parse_attr_cb(const struct nlattr *attr, void *data)
@@ -275,7 +277,9 @@ static int mainloop(void)
unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE;
struct mnl_socket *nl;
struct nlmsghdr *nlh;
+ uint32_t ooo_ids[16];
unsigned int portid;
+ int ooo_count = 0;
char *buf;
int ret;
@@ -308,6 +312,9 @@ static int mainloop(void)
ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL);
if (ret < 0) {
+ /* bogus verdict mode will generate ENOENT error messages */
+ if (opts.bogus_verdict && errno == ENOENT)
+ continue;
perror("mnl_cb_run");
exit(EXIT_FAILURE);
}
@@ -316,10 +323,35 @@ static int mainloop(void)
if (opts.delay_ms)
sleep_ms(opts.delay_ms);
- nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict);
- if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
- perror("mnl_socket_sendto");
- exit(EXIT_FAILURE);
+ if (opts.bogus_verdict) {
+ for (int i = 0; i < 50; i++) {
+ nlh = nfq_build_verdict(buf, id + 0x7FFFFFFF + i,
+ opts.queue_num, opts.verdict);
+ mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
+ }
+ }
+
+ if (opts.out_of_order) {
+ ooo_ids[ooo_count] = id;
+ if (ooo_count >= 15) {
+ for (ooo_count; ooo_count >= 0; ooo_count--) {
+ nlh = nfq_build_verdict(buf, ooo_ids[ooo_count],
+ opts.queue_num, opts.verdict);
+ if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+ perror("mnl_socket_sendto");
+ exit(EXIT_FAILURE);
+ }
+ }
+ ooo_count = 0;
+ } else {
+ ooo_count++;
+ }
+ } else {
+ nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict);
+ if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+ perror("mnl_socket_sendto");
+ exit(EXIT_FAILURE);
+ }
}
}
@@ -332,7 +364,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "chvot:q:Q:d:G")) != -1) {
+ while ((c = getopt(argc, argv, "chvoObt:q:Q:d:G")) != -1) {
switch (c) {
case 'c':
opts.count_packets = true;
@@ -375,6 +407,12 @@ static void parse_opts(int argc, char **argv)
case 'v':
opts.verbose++;
break;
+ case 'O':
+ opts.out_of_order = true;
+ break;
+ case 'b':
+ opts.bogus_verdict = true;
+ break;
}
}
diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
index ea766bdc5d04..d80390848e85 100755
--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
@@ -11,6 +11,7 @@ ret=0
timeout=5
SCTP_TEST_TIMEOUT=60
+STRESS_TEST_TIMEOUT=30
cleanup()
{
@@ -719,6 +720,74 @@ EOF
fi
}
+check_tainted()
+{
+ local msg="$1"
+
+ if [ "$tainted_then" -ne 0 ];then
+ return
+ fi
+
+ read tainted_now < /proc/sys/kernel/tainted
+ if [ "$tainted_now" -eq 0 ];then
+ echo "PASS: $msg"
+ else
+ echo "TAINT: $msg"
+ dmesg
+ ret=1
+ fi
+}
+
+test_queue_stress()
+{
+ read tainted_then < /proc/sys/kernel/tainted
+ local i
+
+ ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF
+flush ruleset
+table inet t {
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+
+ queue flags bypass to numgen random mod 8
+ }
+}
+EOF
+ timeout "$STRESS_TEST_TIMEOUT" ip netns exec "$ns2" \
+ socat -u UDP-LISTEN:12345,fork,pf=ipv4 STDOUT > /dev/null &
+
+ timeout "$STRESS_TEST_TIMEOUT" ip netns exec "$ns3" \
+ socat -u UDP-LISTEN:12345,fork,pf=ipv4 STDOUT > /dev/null &
+
+ for i in $(seq 0 7); do
+ ip netns exec "$nsrouter" timeout "$STRESS_TEST_TIMEOUT" \
+ ./nf_queue -q $i -t 2 -O -b > /dev/null &
+ done
+
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ ping -q -f 10.0.2.99 > /dev/null 2>&1 &
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ ping -q -f 10.0.3.99 > /dev/null 2>&1 &
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ ping -q -f "dead:2::99" > /dev/null 2>&1 &
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ ping -q -f "dead:3::99" > /dev/null 2>&1 &
+
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns2" 12345
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns3" 12345
+
+ for i in $(seq 1 4);do
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ socat -u STDIN UDP-DATAGRAM:10.0.2.99:12345 < /dev/zero > /dev/null &
+ ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \
+ socat -u STDIN UDP-DATAGRAM:10.0.3.99:12345 < /dev/zero > /dev/null &
+ done
+
+ wait
+
+ check_tainted "concurrent queueing"
+}
+
test_queue_removal()
{
read tainted_then < /proc/sys/kernel/tainted
@@ -742,18 +811,7 @@ EOF
ip netns exec "$ns1" nft flush ruleset
- if [ "$tainted_then" -ne 0 ];then
- return
- fi
-
- read tainted_now < /proc/sys/kernel/tainted
- if [ "$tainted_now" -eq 0 ];then
- echo "PASS: queue program exiting while packets queued"
- else
- echo "TAINT: queue program exiting while packets queued"
- dmesg
- ret=1
- fi
+ check_tainted "queue program exiting while packets queued"
}
ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -799,6 +857,7 @@ test_sctp_forward
test_sctp_output
test_udp_nat_race
test_udp_gro_ct
+test_queue_stress
# should be last, adds vrf device in ns1 and changes routes
test_icmp_vrf
diff --git a/tools/testing/selftests/net/srv6_iptunnel_cache.sh b/tools/testing/selftests/net/srv6_iptunnel_cache.sh
new file mode 100755
index 000000000000..62638ab679d9
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_iptunnel_cache.sh
@@ -0,0 +1,197 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+
+# This test verifies that the seg6 lwtunnel does not share the dst_cache
+# between the input (forwarding) and output (locally generated) paths.
+#
+# A shared dst_cache allows a forwarded packet to populate the cache and a
+# subsequent locally generated packet to silently reuse that entry, bypassing
+# its own route lookup. To expose this, the SID is made reachable only for
+# forwarded traffic (via an ip rule matching iif) and blackholed for everything
+# else. A local ping on ns_router must always hit the blackhole;
+# if it succeeds after a forwarded packet has populated the
+# cache, the bug is confirmed.
+#
+# Both forwarded and local packets are pinned to the same CPU with taskset,
+# since dst_cache is per-cpu.
+#
+#
+# +--------------------+ +--------------------+
+# | ns_src | | ns_dst |
+# | | | |
+# | veth-s0 | | veth-d0 |
+# | fd00::1/64 | | fd01::2/64 |
+# +-------+------------+ +----------+---------+
+# | |
+# | +--------------------+ |
+# | | ns_router | |
+# | | | |
+# +------------+ veth-r0 veth-r1 +--------------+
+# | fd00::2 fd01::1 |
+# +--------------------+
+#
+#
+# ns_router: encap (main table)
+# +---------+---------------------------------------+
+# | dst | action |
+# +---------+---------------------------------------+
+# | cafe::1 | encap seg6 mode encap segs fc00::100 |
+# +---------+---------------------------------------+
+#
+# ns_router: post-encap SID resolution
+# +-------+------------+----------------------------+
+# | table | dst | action |
+# +-------+------------+----------------------------+
+# | 100 | fc00::100 | via fd01::2 dev veth-r1 |
+# +-------+------------+----------------------------+
+# | main | fc00::100 | blackhole |
+# +-------+------------+----------------------------+
+#
+# ns_router: ip rule
+# +------------------+------------------------------+
+# | match | action |
+# +------------------+------------------------------+
+# | iif veth-r0 | lookup 100 |
+# +------------------+------------------------------+
+#
+# ns_dst: SRv6 decap (main table)
+# +--------------+----------------------------------+
+# | SID | action |
+# +--------------+----------------------------------+
+# | fc00::100 | End.DT6 table 255 (local) |
+# +--------------+----------------------------------+
+
+source lib.sh
+
+readonly SID="fc00::100"
+readonly DEST="cafe::1"
+
+readonly SRC_MAC="02:00:00:00:00:01"
+readonly RTR_R0_MAC="02:00:00:00:00:02"
+readonly RTR_R1_MAC="02:00:00:00:00:03"
+readonly DST_MAC="02:00:00:00:00:04"
+
+cleanup()
+{
+ cleanup_ns "${NS_SRC}" "${NS_RTR}" "${NS_DST}"
+}
+
+check_prerequisites()
+{
+ if ! command -v ip &>/dev/null; then
+ echo "SKIP: ip tool not found"
+ exit "${ksft_skip}"
+ fi
+
+ if ! command -v ping &>/dev/null; then
+ echo "SKIP: ping not found"
+ exit "${ksft_skip}"
+ fi
+
+ if ! command -v sysctl &>/dev/null; then
+ echo "SKIP: sysctl not found"
+ exit "${ksft_skip}"
+ fi
+
+ if ! command -v taskset &>/dev/null; then
+ echo "SKIP: taskset not found"
+ exit "${ksft_skip}"
+ fi
+}
+
+setup()
+{
+ setup_ns NS_SRC NS_RTR NS_DST
+
+ ip link add veth-s0 netns "${NS_SRC}" type veth \
+ peer name veth-r0 netns "${NS_RTR}"
+ ip link add veth-r1 netns "${NS_RTR}" type veth \
+ peer name veth-d0 netns "${NS_DST}"
+
+ ip -n "${NS_SRC}" link set veth-s0 address "${SRC_MAC}"
+ ip -n "${NS_RTR}" link set veth-r0 address "${RTR_R0_MAC}"
+ ip -n "${NS_RTR}" link set veth-r1 address "${RTR_R1_MAC}"
+ ip -n "${NS_DST}" link set veth-d0 address "${DST_MAC}"
+
+ # ns_src
+ ip -n "${NS_SRC}" link set veth-s0 up
+ ip -n "${NS_SRC}" addr add fd00::1/64 dev veth-s0 nodad
+ ip -n "${NS_SRC}" -6 route add "${DEST}"/128 via fd00::2
+
+ # ns_router
+ ip -n "${NS_RTR}" link set veth-r0 up
+ ip -n "${NS_RTR}" addr add fd00::2/64 dev veth-r0 nodad
+ ip -n "${NS_RTR}" link set veth-r1 up
+ ip -n "${NS_RTR}" addr add fd01::1/64 dev veth-r1 nodad
+ ip netns exec "${NS_RTR}" sysctl -qw net.ipv6.conf.all.forwarding=1
+
+ ip -n "${NS_RTR}" -6 route add "${DEST}"/128 \
+ encap seg6 mode encap segs "${SID}" dev veth-r0
+ ip -n "${NS_RTR}" -6 route add "${SID}"/128 table 100 \
+ via fd01::2 dev veth-r1
+ ip -n "${NS_RTR}" -6 route add blackhole "${SID}"/128
+ ip -n "${NS_RTR}" -6 rule add iif veth-r0 lookup 100
+
+ # ns_dst
+ ip -n "${NS_DST}" link set veth-d0 up
+ ip -n "${NS_DST}" addr add fd01::2/64 dev veth-d0 nodad
+ ip -n "${NS_DST}" addr add "${DEST}"/128 dev lo nodad
+ ip -n "${NS_DST}" -6 route add "${SID}"/128 \
+ encap seg6local action End.DT6 table 255 dev veth-d0
+ ip -n "${NS_DST}" -6 route add fd00::/64 via fd01::1
+
+ # static neighbors
+ ip -n "${NS_SRC}" -6 neigh add fd00::2 dev veth-s0 \
+ lladdr "${RTR_R0_MAC}" nud permanent
+ ip -n "${NS_RTR}" -6 neigh add fd00::1 dev veth-r0 \
+ lladdr "${SRC_MAC}" nud permanent
+ ip -n "${NS_RTR}" -6 neigh add fd01::2 dev veth-r1 \
+ lladdr "${DST_MAC}" nud permanent
+ ip -n "${NS_DST}" -6 neigh add fd01::1 dev veth-d0 \
+ lladdr "${RTR_R1_MAC}" nud permanent
+}
+
+test_cache_isolation()
+{
+ RET=0
+
+ # local ping with empty cache: must fail (SID is blackholed)
+ if ip netns exec "${NS_RTR}" taskset -c 0 \
+ ping -c 1 -W 2 "${DEST}" &>/dev/null; then
+ echo "SKIP: local ping succeeded, topology broken"
+ exit "${ksft_skip}"
+ fi
+
+ # forward from ns_src to populate the input cache
+ if ! ip netns exec "${NS_SRC}" taskset -c 0 \
+ ping -c 1 -W 2 "${DEST}" &>/dev/null; then
+ echo "SKIP: forwarded ping failed, topology broken"
+ exit "${ksft_skip}"
+ fi
+
+ # local ping again: must still fail; if the output path reuses
+ # the input cache, it bypasses the blackhole and the ping succeeds
+ if ip netns exec "${NS_RTR}" taskset -c 0 \
+ ping -c 1 -W 2 "${DEST}" &>/dev/null; then
+ echo "FAIL: output path used dst cached by input path"
+ RET="${ksft_fail}"
+ else
+ echo "PASS: output path dst_cache is independent"
+ fi
+
+ return "${RET}"
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+trap cleanup EXIT
+
+check_prerequisites
+setup
+test_cache_isolation
+exit "${RET}"
diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
index 3589549f7228..74b6f6bcf067 100644
--- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
+++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
@@ -290,10 +290,11 @@ TEST(ptrace_v_syscall_clobbering)
/* verify initial vsetvli settings */
- if (is_xtheadvector_supported())
+ if (is_xtheadvector_supported()) {
EXPECT_EQ(5UL, regset_data->vtype);
- else
+ } else {
EXPECT_EQ(9UL, regset_data->vtype);
+ }
EXPECT_EQ(regset_data->vlenb, regset_data->vl);
EXPECT_EQ(vlenb, regset_data->vlenb);
@@ -346,8 +347,8 @@ FIXTURE_TEARDOWN(v_csr_invalid)
{
}
-#define VECTOR_1_0 BIT(0)
-#define XTHEAD_VECTOR_0_7 BIT(1)
+#define VECTOR_1_0 _BITUL(0)
+#define XTHEAD_VECTOR_0_7 _BITUL(1)
#define vector_test(x) ((x) & VECTOR_1_0)
#define xthead_test(x) ((x) & XTHEAD_VECTOR_0_7)
@@ -619,10 +620,11 @@ TEST_F(v_csr_invalid, ptrace_v_invalid_values)
/* verify initial vsetvli settings */
- if (is_xtheadvector_supported())
+ if (is_xtheadvector_supported()) {
EXPECT_EQ(5UL, regset_data->vtype);
- else
+ } else {
EXPECT_EQ(9UL, regset_data->vtype);
+ }
EXPECT_EQ(regset_data->vlenb, regset_data->vl);
EXPECT_EQ(vlenb, regset_data->vlenb);
@@ -827,10 +829,11 @@ TEST_F(v_csr_valid, ptrace_v_valid_values)
/* verify initial vsetvli settings */
- if (is_xtheadvector_supported())
+ if (is_xtheadvector_supported()) {
EXPECT_EQ(5UL, regset_data->vtype);
- else
+ } else {
EXPECT_EQ(9UL, regset_data->vtype);
+ }
EXPECT_EQ(regset_data->vlenb, regset_data->vl);
EXPECT_EQ(vlenb, regset_data->vlenb);
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 9430ef5b8bc3..1fe1338c79cd 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -344,7 +344,9 @@ void send_buf(int fd, const void *buf, size_t len, int flags,
ret = send(fd, buf + nwritten, len - nwritten, flags);
timeout_check("send");
- if (ret == 0 || (ret < 0 && errno != EINTR))
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
break;
nwritten += ret;
@@ -396,7 +398,9 @@ void recv_buf(int fd, void *buf, size_t len, int flags, ssize_t expected_ret)
ret = recv(fd, buf + nread, len - nread, flags);
timeout_check("recv");
- if (ret == 0 || (ret < 0 && errno != EINTR))
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
break;
nread += ret;