summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2023-03-09 17:49:58 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-03-22 15:38:05 +0300
commitbfbf356009a387e757e5affbaadf3cb3ff7cde32 (patch)
tree9f75d5557b0fa3c1e193d0d69c1e602aa4e7fb9d /net
parent53091aa34602de12808cad173d5876342c382788 (diff)
downloadlinux-bfbf356009a387e757e5affbaadf3cb3ff7cde32.tar.xz
mptcp: refactor passive socket initialization
commit 3a236aef280ed5122b2d47087eb514d0921ae033 upstream. After commit 30e51b923e43 ("mptcp: fix unreleased socket in accept queue") unaccepted msk sockets go throu complete shutdown, we don't need anymore to delay inserting the first subflow into the subflow lists. The reference counting deserve some extra care, as __mptcp_close() is unaware of the request socket linkage to the first subflow. Please note that this is more a refactoring than a fix but because this modification is needed to include other corrections, see the following commits. Then a Fixes tag has been added here to help the stable team. Fixes: 30e51b923e43 ("mptcp: fix unreleased socket in accept queue") Cc: stable@vger.kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net> Tested-by: Christoph Paasch <cpaasch@apple.com> Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net> Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/mptcp/protocol.c17
-rw-r--r--net/mptcp/subflow.c27
2 files changed, 21 insertions, 23 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index bc6c1f62a690..38db028826e9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_socket && !ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
- mptcp_propagate_sndbuf((struct sock *)msk, ssk);
mptcp_sockopt_sync_locked(msk, ssk);
return true;
}
@@ -3699,22 +3698,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
lock_sock(newsk);
- /* PM/worker can now acquire the first subflow socket
- * lock without racing with listener queue cleanup,
- * we can notify it, if needed.
- *
- * Even if remote has reset the initial subflow by now
- * the refcnt is still at least one.
- */
- subflow = mptcp_subflow_ctx(msk->first);
- list_add(&subflow->node, &msk->conn_list);
- sock_hold(msk->first);
- if (mptcp_is_fully_established(newsk))
- mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
-
- mptcp_rcv_space_init(msk, msk->first);
- mptcp_propagate_sndbuf(newsk, msk->first);
-
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 55d6161170ef..1eb97ef5d350 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -396,6 +396,12 @@ void mptcp_subflow_reset(struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
+ /* mptcp_mp_fail_no_response() can reach here on an already closed
+ * socket
+ */
+ if (ssk->sk_state == TCP_CLOSE)
+ return;
+
/* must hold: tcp_done() could drop last reference on parent */
sock_hold(sk);
@@ -749,6 +755,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
+ struct mptcp_sock *owner;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
@@ -823,6 +830,8 @@ create_child:
ctx->setsockopt_seq = listener->setsockopt_seq;
if (ctx->mp_capable) {
+ owner = mptcp_sk(new_msk);
+
/* this can't race with mptcp_close(), as the msk is
* not yet exposted to user-space
*/
@@ -831,14 +840,14 @@ create_child:
/* record the newly created socket as the first msk
* subflow, but don't link it yet into conn_list
*/
- WRITE_ONCE(mptcp_sk(new_msk)->first, child);
+ WRITE_ONCE(owner->first, child);
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
- mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
- mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
+ mptcp_pm_new_connection(owner, child, 1);
+ mptcp_token_accept(subflow_req, owner);
ctx->conn = new_msk;
new_msk = NULL;
@@ -846,15 +855,21 @@ create_child:
* uses the correct data
*/
mptcp_copy_inaddrs(ctx->conn, child);
+ mptcp_propagate_sndbuf(ctx->conn, child);
+
+ mptcp_rcv_space_init(owner, child);
+ list_add(&ctx->node, &owner->conn_list);
+ sock_hold(child);
/* with OoO packets we can reach here without ingress
* mpc option
*/
- if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
+ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
mptcp_subflow_fully_established(ctx, &mp_opt);
+ mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
+ ctx->pm_notified = 1;
+ }
} else if (ctx->mp_join) {
- struct mptcp_sock *owner;
-
owner = subflow_req->msk;
if (!owner) {
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);