summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeliang Tang <geliang.tang@suse.com>2022-04-27 00:57:15 +0300
committerDavid S. Miller <davem@davemloft.net>2022-04-27 12:45:54 +0300
commit49fa1919d6bcdcf3cf3d080c1943f537f6ed5e70 (patch)
tree9763e45a6fa1bd778b2bdff78d32de45a5402345
parent9c81be0dbc89ccb76ce34c3a88425bf3a0d57ebb (diff)
downloadlinux-49fa1919d6bcdcf3cf3d080c1943f537f6ed5e70.tar.xz
mptcp: reset subflow when MP_FAIL doesn't respond
This patch adds a new msk->flags bit MPTCP_FAIL_NO_RESPONSE, then reuses sk_timer to trigger a check if we have not received a response from the peer after sending MP_FAIL. If the peer doesn't respond properly, reset the subflow. Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/mptcp/pm.c8
-rw-r--r--net/mptcp/protocol.c48
-rw-r--r--net/mptcp/protocol.h1
-rw-r--r--net/mptcp/subflow.c11
4 files changed, 68 insertions, 0 deletions
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 971e843a304c..14f448d82bb2 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -287,6 +287,7 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct sock *s = (struct sock *)msk;
pr_debug("fail_seq=%llu", fail_seq);
@@ -299,6 +300,13 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1;
+ } else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
+ pr_debug("MP_FAIL response received");
+
+ mptcp_data_lock(s);
+ if (inet_sk_state_load(s) != TCP_CLOSE)
+ sk_stop_timer(s, &s->sk_timer);
+ mptcp_data_unlock(s);
}
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index ea74122065f1..a5d466e6b538 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2169,10 +2169,38 @@ static void mptcp_retransmit_timer(struct timer_list *t)
sock_put(sk);
}
+static struct mptcp_subflow_context *
+mp_fail_response_expect_subflow(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *ret = NULL;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ if (READ_ONCE(subflow->mp_fail_response_expect)) {
+ ret = subflow;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void mptcp_check_mp_fail_response(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+
+ bh_lock_sock(sk);
+ subflow = mp_fail_response_expect_subflow(msk);
+ if (subflow)
+ __set_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags);
+ bh_unlock_sock(sk);
+}
+
static void mptcp_timeout_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
+ mptcp_check_mp_fail_response(mptcp_sk(sk));
mptcp_schedule_work(sk);
sock_put(sk);
}
@@ -2499,6 +2527,23 @@ reset_timer:
mptcp_data_unlock(sk);
}
+static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *ssk;
+ bool slow;
+
+ subflow = mp_fail_response_expect_subflow(msk);
+ if (subflow) {
+ pr_debug("MP_FAIL doesn't respond, reset the subflow");
+
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
+ mptcp_subflow_reset(ssk);
+ unlock_sock_fast(ssk, slow);
+ }
+}
+
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
@@ -2539,6 +2584,9 @@ static void mptcp_worker(struct work_struct *work)
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
+ if (test_and_clear_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags))
+ mptcp_mp_fail_no_response(msk);
+
unlock:
release_sock(sk);
sock_put(sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index cc66c81a8fab..3a8740fef918 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -116,6 +116,7 @@
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_FAIL_NO_RESPONSE 6
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index ca2352ad20d4..75c824b67ca9 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -968,6 +968,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool csum_reqd = READ_ONCE(msk->csum_enabled);
+ struct sock *sk = (struct sock *)msk;
struct mptcp_ext *mpext;
struct sk_buff *skb;
u16 data_len;
@@ -1009,6 +1010,12 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
+ if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
+ mptcp_data_lock(sk);
+ if (inet_sk_state_load(sk) != TCP_CLOSE)
+ sk_stop_timer(sk, &sk->sk_timer);
+ mptcp_data_unlock(sk);
+ }
return MAPPING_INVALID;
}
@@ -1219,6 +1226,10 @@ fallback:
sk_eat_skb(ssk, skb);
} else {
WRITE_ONCE(subflow->mp_fail_response_expect, true);
+ /* The data lock is acquired in __mptcp_move_skbs() */
+ sk_reset_timer((struct sock *)msk,
+ &((struct sock *)msk)->sk_timer,
+ jiffies + TCP_RTO_MAX);
}
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
return true;