summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2025-11-21 20:02:04 +0300
committerJakub Kicinski <kuba@kernel.org>2025-11-25 06:49:41 +0300
commit85f22b8e1e9db00a7003bf3c0e0a60d19718e083 (patch)
tree4320af5ad8a2f32f3d0c29f676363a5ff5fe601e
parent68c7c386714512219e3517ec1097d164aee102fb (diff)
downloadlinux-85f22b8e1e9db00a7003bf3c0e0a60d19718e083.tar.xz
mptcp: cleanup fallback data fin reception
MPTCP currently generate a dummy data_fin for fallback socket when the fallback subflow has completed data reception using the current ack_seq. We are going to introduce backlog usage for the msk soon, even for fallback sockets: the ack_seq value will not match the most recent sequence number seen by the fallback subflow socket, as it will ignore data_seq sitting in the backlog. Instead use the last map sequence number to set the data_fin, as fallback (dummy) map sequences are always in sequence. Reviewed-by: Geliang Tang <geliang@kernel.org> Tested-by: Geliang Tang <geliang@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Link: https://patch.msgid.link/20251121-net-next-mptcp-memcg-backlog-imp-v1-5-1f34b6c1e0b1@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/mptcp/subflow.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 72b7efe388db..1f7311afd48d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1285,6 +1285,7 @@ static bool subflow_is_done(const struct sock *sk)
/* sched mptcp worker for subflow cleanup if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
+ const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = (struct sock *)msk;
if (likely(ssk->sk_state != TCP_CLOSE &&
@@ -1303,7 +1304,8 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
*/
if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
msk->first == ssk &&
- mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
+ mptcp_update_rcv_data_fin(msk, subflow->map_seq +
+ subflow->map_data_len, true))
mptcp_schedule_work(sk);
}