diff options
author | Florian Westphal <fw@strlen.de> | 2021-02-13 02:59:56 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-02-13 03:31:45 +0300 |
commit | 40947e13997a1cba4e875893ca6e5d5e61a0689d (patch) | |
tree | b354326e4e413936abf9600bdeb6ac01ad55645e /net | |
parent | a141e02e393370e082b25636401c49978b61bfcf (diff) | |
download | linux-40947e13997a1cba4e875893ca6e5d5e61a0689d.tar.xz |
mptcp: schedule worker when subflow is closed
When remote side closes a subflow we should schedule the worker to
dispose of the subflow in a timely manner.
Otherwise, SF_CLOSED event won't be generated until the mptcp
socket itself is closing or local side is closing another subflow.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/mptcp/protocol.c | 4 | ||||
-rw-r--r-- | net/mptcp/subflow.c | 25 |
2 files changed, 27 insertions, 2 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3fd8aef979a3..267c5521692d 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2170,6 +2170,10 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk) if (inet_sk_state_load(ssk) != TCP_CLOSE) continue; + /* 'subflow_data_ready' will re-sched once rx queue is empty */ + if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) + continue; + mptcp_close_ssk((struct sock *)msk, ssk, subflow); } } diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 280da418d60b..36b15726f851 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -953,6 +953,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, subflow->map_valid = 0; } +/* sched mptcp worker to remove the subflow if no more data is pending */ +static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) +{ + struct sock *sk = (struct sock *)msk; + + if (likely(ssk->sk_state != TCP_CLOSE)) + return; + + if (skb_queue_empty(&ssk->sk_receive_queue) && + !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) { + sock_hold(sk); + if (!schedule_work(&msk->work)) + sock_put(sk); + } +} + static bool subflow_check_data_avail(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); @@ -991,11 +1007,11 @@ static bool subflow_check_data_avail(struct sock *ssk) } if (status != MAPPING_OK) - return false; + goto no_data; skb = skb_peek(&ssk->sk_receive_queue); if (WARN_ON_ONCE(!skb)) - return false; + goto no_data; /* if msk lacks the remote key, this subflow must provide an * MP_CAPABLE-based mapping @@ -1029,6 +1045,9 @@ static bool subflow_check_data_avail(struct sock *ssk) } return true; +no_data: + subflow_sched_work_if_closed(msk, ssk); + return false; fatal: /* fatal protocol error, close the socket */ /* This barrier is coupled with smp_rmb() in tcp_poll() */ @@ -1413,6 +1432,8 @@ static void subflow_state_change(struct sock *sk) if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); + subflow_sched_work_if_closed(mptcp_sk(parent), sk); + if (__mptcp_check_fallback(mptcp_sk(parent)) && !subflow->rx_eof && subflow_is_done(sk)) { subflow->rx_eof = 1; |