diff options
author | Paolo Abeni <pabeni@redhat.com> | 2020-09-14 11:01:13 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-09-14 23:28:02 +0300 |
commit | 04e4cd4f7ca4600409a898fe0abc318372c4c1ab (patch) | |
tree | c2ea27728fa7f2820875e10b83fddda132b6be2a /net/mptcp | |
parent | ab174ad8ef76276cadfdae98731d31797d265927 (diff) | |
download | linux-04e4cd4f7ca4600409a898fe0abc318372c4c1ab.tar.xz |
mptcp: cleanup mptcp_subflow_discard_data()
There is no need to use the tcp_read_sock(), we can
simply drop the skb. Additionally try to look at the
next buffer for in order data.
This both simplifies the code and avoid unneeded indirect
calls.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/mptcp')
-rw-r--r-- | net/mptcp/protocol.h | 1 | ||||
-rw-r--r-- | net/mptcp/subflow.c | 58 |
2 files changed, 14 insertions, 45 deletions
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 0a602acf1f3d..26f5f81f3f4c 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -355,7 +355,6 @@ int mptcp_is_enabled(struct net *net); void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, struct mptcp_options_received *mp_opt); bool mptcp_subflow_data_available(struct sock *sk); -int mptcp_subflow_discard_data(struct sock *sk, unsigned int limit); void __init mptcp_subflow_init(void); /* called with sk socket lock held */ diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 0b1d73a69daf..6eb2fc0a8ebb 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -805,50 +805,22 @@ validate_seq: return MAPPING_OK; } -static int subflow_read_actor(read_descriptor_t *desc, - struct sk_buff *skb, - unsigned int offset, size_t len) +static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, + unsigned int limit) { - size_t copy_len = min(desc->count, len); - - desc->count -= copy_len; + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; + u32 incr; - pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count); - return copy_len; -} + incr = limit >= skb->len ? skb->len + fin : limit; -int mptcp_subflow_discard_data(struct sock *ssk, unsigned int limit) -{ - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); - u32 map_remaining; - size_t delta; - - map_remaining = subflow->map_data_len - - mptcp_subflow_get_map_offset(subflow); - delta = min_t(size_t, limit, map_remaining); - - /* discard mapped data */ - pr_debug("discarding %zu bytes, current map len=%d", delta, - map_remaining); - if (delta) { - read_descriptor_t desc = { - .count = delta, - }; - int ret; - - ret = tcp_read_sock(ssk, &desc, subflow_read_actor); - if (ret < 0) { - ssk->sk_err = -ret; - return ret; - } - if (ret < delta) - return 0; - if (delta == map_remaining) { - subflow->data_avail = 0; - subflow->map_valid = 0; - } - } - return 0; + pr_debug("discarding=%d len=%d seq=%d", incr, skb->len, + subflow->map_subflow_seq); + tcp_sk(ssk)->copied_seq += incr; + if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) + sk_eat_skb(ssk, skb); + if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) + subflow->map_valid = 0; } static bool subflow_check_data_avail(struct sock *ssk) @@ -923,9 +895,7 @@ static bool subflow_check_data_avail(struct sock *ssk) /* only accept in-sequence mapping. Old values are spurious * retransmission */ - if (mptcp_subflow_discard_data(ssk, old_ack - ack_seq)) - goto fatal; - return false; + mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); } return true; |