summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthieu Baerts (NGI0) <matttbe@kernel.org>2024-07-27 13:01:23 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-08-11 13:47:25 +0300
commitadd243b7f6ceab7ad8e6e4267dc63da1a208dcd9 (patch)
treee36900b09b4f6abb1f16cbfdb09ef2aa95681e15
parentda72e783afd27d9f487836b2e6738146c0edd149 (diff)
downloadlinux-add243b7f6ceab7ad8e6e4267dc63da1a208dcd9.tar.xz
mptcp: sched: check both directions for backup
commit b6a66e521a2032f7fcba2af5a9bcbaeaa19b7ca3 upstream. The 'mptcp_subflow_context' structure has two items related to the backup flags: - 'backup': the subflow has been marked as backup by the other peer - 'request_bkup': the backup flag has been set by the host Before this patch, the scheduler was only looking at the 'backup' flag. That can make sense in some cases, but it looks like that's not what we wanted for the general use, because either the path-manager was setting both of them when sending an MP_PRIO, or the receiver was duplicating the 'backup' flag in the subflow request. Note that the use of these two flags in the path-manager are going to be fixed in the next commits, but this change here is needed not to modify the behaviour. Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests") Cc: stable@vger.kernel.org Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--net/mptcp/protocol.c10
2 files changed, 7 insertions, 5 deletions
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 563e48617374..54e8fb5a229c 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow);
- __entry->backup = subflow->backup;
+ __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 973290d08e79..490b6e396106 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1417,13 +1417,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
mptcp_for_each_subflow(msk, subflow) {
+ bool backup = subflow->backup || subflow->request_bkup;
+
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow));
- nr_active += !subflow->backup;
+ nr_active += !backup;
pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) {
/* init pacing rate from socket */
@@ -1434,9 +1436,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
- if (linger_time < send_info[subflow->backup].linger_time) {
- send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].linger_time = linger_time;
+ if (linger_time < send_info[backup].linger_time) {
+ send_info[backup].ssk = ssk;
+ send_info[backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);