diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2026-04-18 21:10:15 +0300 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-04-18 21:10:16 +0300 |
| commit | f3a0e90d4deb6386b90a9d5233028259c441cbc1 (patch) | |
| tree | c01a7b3b1b7bee880edd6529ab29cab2c75cdd69 /include | |
| parent | 82c21069028c5db3463f851ae8ac9cc2e38a3827 (diff) | |
| parent | 9e89b9d03a2d2e30dcca166d5af52f9a8eceab25 (diff) | |
| download | linux-f3a0e90d4deb6386b90a9d5233028259c441cbc1.tar.xz | |
Merge branch 'tcp-take-care-of-tcp_get_timestamping_opt_stats-races'
Eric Dumazet says:
====================
tcp: take care of tcp_get_timestamping_opt_stats() races
tcp_get_timestamping_opt_stats() does not own the socket lock,
this is intentional.
It calls tcp_get_info_chrono_stats() while other threads could
change chrono fields in tcp_chrono_set(). It also reads many
tcp socket fields that can be modified by other cpus/threads.
I do not think we need coherent TCP socket state snapshot
in tcp_get_timestamping_opt_stats().
Add READ_ONCE()/WRITE_ONCE() or data_race() annotations.
Note that icsk_ca_state is a bitfield, thus not covered
in this series.
====================
Link: https://patch.msgid.link/20260416200319.3608680-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/net/tcp.h | 12 | ||||
| -rw-r--r-- | include/net/tcp_ecn.h | 2 |
2 files changed, 9 insertions, 5 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index dfa52ceefd23..ecbadcb3a744 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1513,7 +1513,7 @@ static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp) static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val) { WARN_ON_ONCE((int)val <= 0); - tp->snd_cwnd = val; + WRITE_ONCE(tp->snd_cwnd, val); } static inline bool tcp_in_slow_start(const struct tcp_sock *tp) @@ -2208,10 +2208,14 @@ static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new const u32 now = tcp_jiffies32; enum tcp_chrono old = tp->chrono_type; + /* Following WRITE_ONCE()s pair with READ_ONCE()s in + * tcp_get_info_chrono_stats(). + */ if (old > TCP_CHRONO_UNSPEC) - tp->chrono_stat[old - 1] += now - tp->chrono_start; - tp->chrono_start = now; - tp->chrono_type = new; + WRITE_ONCE(tp->chrono_stat[old - 1], + tp->chrono_stat[old - 1] + now - tp->chrono_start); + WRITE_ONCE(tp->chrono_start, now); + WRITE_ONCE(tp->chrono_type, new); } static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index e9a933641636..865d5c5a7718 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -181,7 +181,7 @@ static inline void tcp_accecn_third_ack(struct sock *sk, tcp_accecn_validate_syn_feedback(sk, ace, sent_ect)) { if ((tcp_accecn_extract_syn_ect(ace) == INET_ECN_CE) && !tp->delivered_ce) - tp->delivered_ce++; + WRITE_ONCE(tp->delivered_ce, 1); } break; } |
