summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/net/tcp_ao/lib/sock.c
diff options
context:
space:
mode:
authorDmitry Safonov <0x7f454c46@gmail.com>2025-03-19 06:13:37 +0300
committerJakub Kicinski <kuba@kernel.org>2025-03-25 16:10:30 +0300
commit3f36781e57b37ad75c01f70e2e9a555efa3b03e5 (patch)
treee1f13989399983e964cffdbcf5c1fb9283725fe8 /tools/testing/selftests/net/tcp_ao/lib/sock.c
parent5a0a3193f6c41381f545adda4725004f64142d41 (diff)
downloadlinux-3f36781e57b37ad75c01f70e2e9a555efa3b03e5.tar.xz
selftests/net: Add mixed select()+polling mode to TCP-AO tests
Currently, tcp_ao tests have two timeouts: TEST_RETRANSMIT_SEC and TEST_TIMEOUT_SEC [by default 1 and 5 seconds]. The first one, TEST_RETRANSMIT_SEC is used for operations that are expected to succeed in order for a test to pass. It is usually not consumed and exists only to avoid indefinite test run if the operation didn't complete. The second one, TEST_RETRANSMIT_SEC exists for the tests that checking operations, that are expected to fail/timeout. It is shorter as it is fully consumed, with an expectation that if operation didn't succeed during that period, it will timeout. And the related test that expects the timeout is passing. The actual operation failure is then cross-verified by other means like counters checks. The issue with TEST_RETRANSMIT_SEC timeout is that 1 second is the exact initial TCP timeout. So, in case the initial segment gets lost (quite unlikely on local veth interface between two net namespaces, yet happens in slow VMs), the retransmission never happens and as a result, the test is not actually testing the functionality. Which in the end fails counters checks. As I want tcp_ao selftests to be fast and finishing in a reasonable amount of time on manual run, I didn't consider increasing TEST_RETRANSMIT_SEC. Rather, initially, BPF_SOCK_OPS_TIMEOUT_INIT looked promising as a lever to make the initial TCP timeout shorter. But as it's not a socket bpf attached thing, but sock_ops (attaches to cgroups), the selftests would have to use libbpf, which I wanted to avoid if not absolutely required. Instead, use a mixed select() and counters polling mode with the longer TEST_TIMEOUT_SEC timeout to detect running-away failed tests. It actually not only allows losing segments and succeeding after the previous TEST_RETRANSMIT_SEC timeout was consumed, but makes the tests expecting timeout/failure pass faster. The only test case taking longer (TEST_TIMEOUT_SEC) now is connect-deny "wrong snd id", which checks for no key on SYN-ACK for which there is no counter in the kernel (see tcp_make_synack()). Yet it can be speed up by poking skpair from the trace event (see trace_tcp_ao_synack_no_key). Fixes: ed9d09b309b1 ("selftests/net: Add a test for TCP-AO keys matching") Reported-by: Jakub Kicinski <kuba@kernel.org> Closes: https://lore.kernel.org/netdev/20241205070656.6ef344d7@kernel.org/ Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com> Link: https://patch.msgid.link/20250319-tcp-ao-selftests-polling-v2-4-da48040153d1@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/net/tcp_ao/lib/sock.c')
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/sock.c162
1 files changed, 144 insertions, 18 deletions
diff --git a/tools/testing/selftests/net/tcp_ao/lib/sock.c b/tools/testing/selftests/net/tcp_ao/lib/sock.c
index 7ffde4dd7942..1befaf0aa98f 100644
--- a/tools/testing/selftests/net/tcp_ao/lib/sock.c
+++ b/tools/testing/selftests/net/tcp_ao/lib/sock.c
@@ -34,10 +34,8 @@ int __test_listen_socket(int backlog, void *addr, size_t addr_sz)
return sk;
}
-int test_wait_fd(int sk, time_t sec, bool write)
+static int __test_wait_fd(int sk, struct timeval *tv, bool write)
{
- struct timeval tv = { .tv_sec = sec };
- struct timeval *ptv = NULL;
fd_set fds, efds;
int ret;
socklen_t slen = sizeof(ret);
@@ -47,14 +45,11 @@ int test_wait_fd(int sk, time_t sec, bool write)
FD_ZERO(&efds);
FD_SET(sk, &efds);
- if (sec)
- ptv = &tv;
-
errno = 0;
if (write)
- ret = select(sk + 1, NULL, &fds, &efds, ptv);
+ ret = select(sk + 1, NULL, &fds, &efds, tv);
else
- ret = select(sk + 1, &fds, NULL, &efds, ptv);
+ ret = select(sk + 1, &fds, NULL, &efds, tv);
if (ret < 0)
return -errno;
if (ret == 0) {
@@ -69,6 +64,52 @@ int test_wait_fd(int sk, time_t sec, bool write)
return 0;
}
+int test_wait_fd(int sk, time_t sec, bool write)
+{
+ struct timeval tv = { .tv_sec = sec, };
+
+ return __test_wait_fd(sk, sec ? &tv : NULL, write);
+}
+
+static bool __skpair_poll_should_stop(int sk, struct tcp_counters *c,
+ test_cnt condition)
+{
+ struct tcp_counters c2;
+ test_cnt diff;
+
+ if (test_get_tcp_counters(sk, &c2))
+ test_error("test_get_tcp_counters()");
+
+ diff = test_cmp_counters(c, &c2);
+ test_tcp_counters_free(&c2);
+ return (diff & condition) == condition;
+}
+
+/* How often wake up and check netns counters & paired (*err) */
+#define POLL_USEC 150
+static int __test_skpair_poll(int sk, bool write, uint64_t timeout,
+ struct tcp_counters *c, test_cnt cond,
+ volatile int *err)
+{
+ uint64_t t;
+
+ for (t = 0; t <= timeout * 1000000; t += POLL_USEC) {
+ struct timeval tv = { .tv_usec = POLL_USEC, };
+ int ret;
+
+ ret = __test_wait_fd(sk, &tv, write);
+ if (ret != -ETIMEDOUT)
+ return ret;
+ if (c && cond && __skpair_poll_should_stop(sk, c, cond))
+ break;
+ if (err && *err)
+ return *err;
+ }
+ if (err)
+ *err = -ETIMEDOUT;
+ return -ETIMEDOUT;
+}
+
int __test_connect_socket(int sk, const char *device,
void *addr, size_t addr_sz, time_t timeout)
{
@@ -113,6 +154,43 @@ out:
return err;
}
+int test_skpair_wait_poll(int sk, bool write,
+ test_cnt cond, volatile int *err)
+{
+ struct tcp_counters c;
+ int ret;
+
+ *err = 0;
+ if (test_get_tcp_counters(sk, &c))
+ test_error("test_get_tcp_counters()");
+ synchronize_threads(); /* 1: init skpair & read nscounters */
+
+ ret = __test_skpair_poll(sk, write, TEST_TIMEOUT_SEC, &c, cond, err);
+ test_tcp_counters_free(&c);
+ return ret;
+}
+
+int _test_skpair_connect_poll(int sk, const char *device,
+ void *addr, size_t addr_sz,
+ test_cnt condition, volatile int *err)
+{
+ struct tcp_counters c;
+ int ret;
+
+ *err = 0;
+ if (test_get_tcp_counters(sk, &c))
+ test_error("test_get_tcp_counters()");
+ synchronize_threads(); /* 1: init skpair & read nscounters */
+ ret = __test_connect_socket(sk, device, addr, addr_sz, -1);
+ if (ret < 0) {
+ test_tcp_counters_free(&c);
+ return (*err = ret);
+ }
+ ret = __test_skpair_poll(sk, 1, TEST_TIMEOUT_SEC, &c, condition, err);
+ test_tcp_counters_free(&c);
+ return ret;
+}
+
int __test_set_md5(int sk, void *addr, size_t addr_sz, uint8_t prefix,
int vrf, const char *password)
{
@@ -515,7 +593,9 @@ void test_tcp_counters_free(struct tcp_counters *cnts)
}
#define TEST_BUF_SIZE 4096
-ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec)
+static ssize_t _test_server_run(int sk, ssize_t quota, struct tcp_counters *c,
+ test_cnt cond, volatile int *err,
+ time_t timeout_sec)
{
ssize_t total = 0;
@@ -524,7 +604,7 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec)
ssize_t bytes, sent;
int ret;
- ret = test_wait_fd(sk, timeout_sec, 0);
+ ret = __test_skpair_poll(sk, 0, timeout_sec, c, cond, err);
if (ret)
return ret;
@@ -535,7 +615,7 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec)
if (bytes == 0)
break;
- ret = test_wait_fd(sk, timeout_sec, 1);
+ ret = __test_skpair_poll(sk, 1, timeout_sec, c, cond, err);
if (ret)
return ret;
@@ -550,13 +630,41 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec)
return total;
}
-ssize_t test_client_loop(int sk, char *buf, size_t buf_sz,
- const size_t msg_len, time_t timeout_sec)
+ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec)
+{
+ return _test_server_run(sk, quota, NULL, 0, NULL,
+ timeout_sec ?: TEST_TIMEOUT_SEC);
+}
+
+int test_skpair_server(int sk, ssize_t quota, test_cnt cond, volatile int *err)
+{
+ struct tcp_counters c;
+ ssize_t ret;
+
+ *err = 0;
+ if (test_get_tcp_counters(sk, &c))
+ test_error("test_get_tcp_counters()");
+ synchronize_threads(); /* 1: init skpair & read nscounters */
+
+ ret = _test_server_run(sk, quota, &c, cond, err, TEST_TIMEOUT_SEC);
+ test_tcp_counters_free(&c);
+ return ret;
+}
+
+static ssize_t test_client_loop(int sk, size_t buf_sz, const size_t msg_len,
+ struct tcp_counters *c, test_cnt cond,
+ volatile int *err, time_t timeout_sec)
{
char msg[msg_len];
int nodelay = 1;
+ char *buf;
size_t i;
+ buf = alloca(buf_sz);
+ if (!buf)
+ return -ENOMEM;
+ randomize_buffer(buf, buf_sz);
+
if (setsockopt(sk, IPPROTO_TCP, TCP_NODELAY, &nodelay, sizeof(nodelay)))
test_error("setsockopt(TCP_NODELAY)");
@@ -564,7 +672,7 @@ ssize_t test_client_loop(int sk, char *buf, size_t buf_sz,
size_t sent, bytes = min(msg_len, buf_sz - i);
int ret;
- ret = test_wait_fd(sk, timeout_sec, 1);
+ ret = __test_skpair_poll(sk, 1, timeout_sec, c, cond, err);
if (ret)
return ret;
@@ -578,7 +686,7 @@ ssize_t test_client_loop(int sk, char *buf, size_t buf_sz,
do {
ssize_t got;
- ret = test_wait_fd(sk, timeout_sec, 0);
+ ret = __test_skpair_poll(sk, 0, timeout_sec, c, cond, err);
if (ret)
return ret;
@@ -601,11 +709,29 @@ int test_client_verify(int sk, const size_t msg_len, const size_t nr,
time_t timeout_sec)
{
size_t buf_sz = msg_len * nr;
- char *buf = alloca(buf_sz);
ssize_t ret;
- randomize_buffer(buf, buf_sz);
- ret = test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec);
+ ret = test_client_loop(sk, buf_sz, msg_len, NULL, 0, NULL, timeout_sec);
+ if (ret < 0)
+ return (int)ret;
+ return ret != buf_sz ? -1 : 0;
+}
+
+int test_skpair_client(int sk, const size_t msg_len, const size_t nr,
+ test_cnt cond, volatile int *err)
+{
+ struct tcp_counters c;
+ size_t buf_sz = msg_len * nr;
+ ssize_t ret;
+
+ *err = 0;
+ if (test_get_tcp_counters(sk, &c))
+ test_error("test_get_tcp_counters()");
+ synchronize_threads(); /* 1: init skpair & read nscounters */
+
+ ret = test_client_loop(sk, buf_sz, msg_len, &c, cond, err,
+ TEST_TIMEOUT_SEC);
+ test_tcp_counters_free(&c);
if (ret < 0)
return (int)ret;
return ret != buf_sz ? -1 : 0;