summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-03-24 02:49:20 +0300
committerJakub Kicinski <kuba@kernel.org>2026-03-25 07:00:38 +0300
commitd1e59a46973719e458bec78d00dd767d7a7ba71f (patch)
tree43fa8dcafd5334b6373a93d77d56ee0f2a1e464f /tools/testing
parent112f4c6320070b19e7d49cba758400adc279e377 (diff)
downloadlinux-d1e59a46973719e458bec78d00dd767d7a7ba71f.tar.xz
tcp: add cwnd_event_tx_start to tcp_congestion_ops
(tcp_congestion_ops)->cwnd_event() is called very often, with @event oscillating between CA_EVENT_TX_START and other values. This is not branch prediction friendly. Provide a new cwnd_event_tx_start pointer dedicated for CA_EVENT_TX_START. Both BBR and CUBIC benefit from this change, since they only care about CA_EVENT_TX_START. No change in kernel size: $ scripts/bloat-o-meter -t vmlinux.0 vmlinux add/remove: 4/4 grow/shrink: 3/1 up/down: 564/-568 (-4) Function old new delta bbr_cwnd_event_tx_start - 450 +450 cubictcp_cwnd_event_tx_start - 70 +70 __pfx_cubictcp_cwnd_event_tx_start - 16 +16 __pfx_bbr_cwnd_event_tx_start - 16 +16 tcp_unregister_congestion_control 93 99 +6 tcp_update_congestion_control 518 521 +3 tcp_register_congestion_control 422 425 +3 __tcp_transmit_skb 3308 3306 -2 __pfx_cubictcp_cwnd_event 16 - -16 __pfx_bbr_cwnd_event 16 - -16 cubictcp_cwnd_event 80 - -80 bbr_cwnd_event 454 - -454 Total: Before=25240512, After=25240508, chg -0.00% Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20260323234920.1097858-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cc_cubic.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c33
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c16
3 files changed, 31 insertions, 26 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
index 9af19dfe4e80..bccf677b94b6 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
@@ -23,7 +23,7 @@
#define TCP_REORDERING (12)
extern void cubictcp_init(struct sock *sk) __ksym;
-extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern void cubictcp_cwnd_event_tx_start(struct sock *sk) __ksym;
extern __u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
extern void cubictcp_state(struct sock *sk, __u8 new_state) __ksym;
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
@@ -108,9 +108,9 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+void BPF_PROG(bpf_cubic_cwnd_event_tx_start, struct sock *sk)
{
- cubictcp_cwnd_event(sk, event);
+ cubictcp_cwnd_event_tx_start(sk);
}
SEC("struct_ops")
@@ -172,7 +172,7 @@ struct tcp_congestion_ops cc_cubic = {
.cong_control = (void *)bpf_cubic_cong_control,
.set_state = (void *)bpf_cubic_state,
.undo_cwnd = (void *)bpf_cubic_undo_cwnd,
- .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .cwnd_event_tx_start = (void *)bpf_cubic_cwnd_event_tx_start,
.pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cc_cubic",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index 46fb2b37d3a7..ce18a4db813f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -185,24 +185,21 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk)
}
SEC("struct_ops")
-void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+void BPF_PROG(bpf_cubic_cwnd_event_tx_start, struct sock *sk)
{
- if (event == CA_EVENT_TX_START) {
- struct bpf_bictcp *ca = inet_csk_ca(sk);
- __u32 now = tcp_jiffies32;
- __s32 delta;
-
- delta = now - tcp_sk(sk)->lsndtime;
-
- /* We were application limited (idle) for a while.
- * Shift epoch_start to keep cwnd growth to cubic curve.
- */
- if (ca->epoch_start && delta > 0) {
- ca->epoch_start += delta;
- if (after(ca->epoch_start, now))
- ca->epoch_start = now;
- }
- return;
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
+ __u32 now = tcp_jiffies32;
+ __s32 delta;
+
+ delta = now - tcp_sk(sk)->lsndtime;
+
+ /* We were application limited (idle) for a while.
+ * Shift epoch_start to keep cwnd growth to cubic curve.
+ */
+ if (ca->epoch_start && delta > 0) {
+ ca->epoch_start += delta;
+ if (after(ca->epoch_start, now))
+ ca->epoch_start = now;
}
}
@@ -537,7 +534,7 @@ struct tcp_congestion_ops cubic = {
.cong_avoid = (void *)bpf_cubic_cong_avoid,
.set_state = (void *)bpf_cubic_state,
.undo_cwnd = (void *)bpf_cubic_undo_cwnd,
- .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .cwnd_event_tx_start = (void *)bpf_cubic_cwnd_event_tx_start,
.pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cubic",
};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
index f95862f570b7..0a3e9d35bf6f 100644
--- a/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
@@ -8,7 +8,7 @@ extern void bbr_init(struct sock *sk) __ksym;
extern void bbr_main(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs) __ksym;
extern u32 bbr_sndbuf_expand(struct sock *sk) __ksym;
extern u32 bbr_undo_cwnd(struct sock *sk) __ksym;
-extern void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern void bbr_cwnd_event_tx_start(struct sock *sk) __ksym;
extern u32 bbr_ssthresh(struct sock *sk) __ksym;
extern u32 bbr_min_tso_segs(struct sock *sk) __ksym;
extern void bbr_set_state(struct sock *sk, u8 new_state) __ksym;
@@ -16,6 +16,7 @@ extern void bbr_set_state(struct sock *sk, u8 new_state) __ksym;
extern void dctcp_init(struct sock *sk) __ksym;
extern void dctcp_update_alpha(struct sock *sk, u32 flags) __ksym;
extern void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) __ksym;
+extern void dctcp_cwnd_event_tx_start(struct sock *sk) __ksym;
extern u32 dctcp_ssthresh(struct sock *sk) __ksym;
extern u32 dctcp_cwnd_undo(struct sock *sk) __ksym;
extern void dctcp_state(struct sock *sk, u8 new_state) __ksym;
@@ -24,7 +25,7 @@ extern void cubictcp_init(struct sock *sk) __ksym;
extern u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
extern void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) __ksym;
extern void cubictcp_state(struct sock *sk, u8 new_state) __ksym;
-extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern void cubictcp_cwnd_event_tx_start(struct sock *sk) __ksym;
extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
SEC("struct_ops")
@@ -69,9 +70,15 @@ u32 BPF_PROG(undo_cwnd, struct sock *sk)
SEC("struct_ops")
void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
- bbr_cwnd_event(sk, event);
dctcp_cwnd_event(sk, event);
- cubictcp_cwnd_event(sk, event);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cwnd_event_tx_start, struct sock *sk)
+{
+ bbr_cwnd_event_tx_start(sk);
+ dctcp_cwnd_event_tx_start(sk);
+ cubictcp_cwnd_event_tx_start(sk);
}
SEC("struct_ops")
@@ -111,6 +118,7 @@ struct tcp_congestion_ops tcp_ca_kfunc = {
.sndbuf_expand = (void *)sndbuf_expand,
.undo_cwnd = (void *)undo_cwnd,
.cwnd_event = (void *)cwnd_event,
+ .cwnd_event_tx_start = (void *)cwnd_event_tx_start,
.ssthresh = (void *)ssthresh,
.min_tso_segs = (void *)min_tso_segs,
.set_state = (void *)set_state,