diff options
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r-- | net/mptcp/subflow.c | 69 |
1 files changed, 62 insertions, 7 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index df3192305967..89b91bc7a831 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -4,6 +4,8 @@ * Copyright (c) 2017 - 2019, Intel Corporation. */ +#define pr_fmt(fmt) "MPTCP: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -18,6 +20,33 @@ #include <net/mptcp.h> #include "protocol.h" +static int subflow_rebuild_header(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + int err = 0; + + if (subflow->request_mptcp && !subflow->token) { + pr_debug("subflow=%p", sk); + err = mptcp_token_new_connect(sk); + } + + if (err) + return err; + + return subflow->icsk_af_ops->rebuild_header(sk); +} + +static void subflow_req_destructor(struct request_sock *req) +{ + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + + pr_debug("subflow_req=%p", subflow_req); + + if (subflow_req->mp_capable) + mptcp_token_destroy_request(subflow_req->token); + tcp_request_sock_ops.destructor(req); +} + static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) @@ -42,7 +71,12 @@ static void subflow_init_req(struct request_sock *req, #endif if (rx_opt.mptcp.mp_capable && listener->request_mptcp) { - subflow_req->mp_capable = 1; + int err; + + err = mptcp_token_new_request(req); + if (err == 0) + subflow_req->mp_capable = 1; + subflow_req->remote_key = rx_opt.mptcp.sndr_key; } } @@ -150,16 +184,28 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, req_unhash, own_req); if (child && *own_req) { - if (!mptcp_subflow_ctx(child)) { - pr_debug("Closing child socket"); - inet_sk_set_state(child, TCP_CLOSE); - sock_set_flag(child, SOCK_DEAD); - inet_csk_destroy_sock(child); - child = NULL; + struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); + + /* we have null ctx on TCP fallback, not fatal on MPC + * handshake + */ + if (!ctx) + return child; + + if (ctx->mp_capable) { + if (mptcp_token_new_accept(ctx->token)) + goto close_child; } } return child; + +close_child: + pr_debug("closing child socket"); + tcp_send_active_reset(child, GFP_ATOMIC); + inet_csk_prepare_forced_close(child); + tcp_done(child); + return NULL; } static struct inet_connection_sock_af_ops subflow_specific; @@ -224,6 +270,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) pr_debug("subflow=%p", subflow); *new_sock = sf; + sock_hold(sk); subflow->conn = sk; return 0; @@ -286,6 +333,9 @@ static void subflow_ulp_release(struct sock *sk) if (!ctx) return; + if (ctx->conn) + sock_put(ctx->conn); + kfree_rcu(ctx, rcu); } @@ -323,6 +373,7 @@ static void subflow_ulp_clone(const struct request_sock *req, new_ctx->fourth_ack = 1; new_ctx->remote_key = subflow_req->remote_key; new_ctx->local_key = subflow_req->local_key; + new_ctx->token = subflow_req->token; } static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { @@ -346,6 +397,8 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) if (!subflow_ops->slab) return -ENOMEM; + subflow_ops->destructor = subflow_req_destructor; + return 0; } @@ -362,6 +415,7 @@ void mptcp_subflow_init(void) subflow_specific.conn_request = subflow_v4_conn_request; subflow_specific.syn_recv_sock = subflow_syn_recv_sock; subflow_specific.sk_rx_dst_set = subflow_finish_connect; + subflow_specific.rebuild_header = subflow_rebuild_header; #if IS_ENABLED(CONFIG_MPTCP_IPV6) subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; @@ -371,6 +425,7 @@ void mptcp_subflow_init(void) subflow_v6_specific.conn_request = subflow_v6_conn_request; subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; + subflow_v6_specific.rebuild_header = subflow_rebuild_header; subflow_v6m_specific = subflow_v6_specific; subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; |