diff options
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r-- | net/ipv4/tcp_fastopen.c | 45 |
1 files changed, 32 insertions, 13 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 815c85e3b1e0..ea82fd492c1b 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, struct tcp_sock *tp; struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; struct sock *child; + u32 end_seq; req->num_retrans = 0; req->num_timeout = 0; @@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk, /* Queue the data carried in the SYN packet. We need to first * bump skb's refcnt because the caller will attempt to free it. + * Note that IPv6 might also have used skb_get() trick + * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) + * So we need to eventually get a clone of the packet, + * before inserting it in sk_receive_queue. * * XXX (TFO) - we honor a zero-payload TFO request for now, * (any reason not to?) but no need to queue the skb since * there is no data. How about SYN+FIN? */ - if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) { - skb = skb_get(skb); - skb_dst_drop(skb); - __skb_pull(skb, tcp_hdr(skb)->doff * 4); - skb_set_owner_r(skb, child); - __skb_queue_tail(&child->sk_receive_queue, skb); - tp->syn_data_acked = 1; + end_seq = TCP_SKB_CB(skb)->end_seq; + if (end_seq != TCP_SKB_CB(skb)->seq + 1) { + struct sk_buff *skb2; + + if (unlikely(skb_shared(skb))) + skb2 = skb_clone(skb, GFP_ATOMIC); + else + skb2 = skb_get(skb); + + if (likely(skb2)) { + skb_dst_drop(skb2); + __skb_pull(skb2, tcp_hdrlen(skb)); + skb_set_owner_r(skb2, child); + __skb_queue_tail(&child->sk_receive_queue, skb2); + tp->syn_data_acked = 1; + } else { + end_seq = TCP_SKB_CB(skb)->seq + 1; + } } - tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; sk->sk_data_ready(sk); bh_unlock_sock(child); sock_put(child); @@ -255,6 +271,9 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct tcp_fastopen_cookie valid_foc = { .len = -1 }; bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; + if (foc->len == 0) /* Client requests a cookie */ + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); + if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && (syn_data || foc->len >= 0) && tcp_fastopen_queue_check(sk))) { @@ -265,7 +284,8 @@ bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) goto fastopen; - if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) && + if (foc->len >= 0 && /* Client presents or requests a cookie */ + tcp_fastopen_cookie_gen(req, skb, &valid_foc) && foc->len == TCP_FASTOPEN_COOKIE_SIZE && foc->len == valid_foc.len && !memcmp(foc->val, valid_foc.val, foc->len)) { @@ -284,11 +304,10 @@ fastopen: LINUX_MIB_TCPFASTOPENPASSIVE); return true; } - } + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); + } else if (foc->len > 0) /* Client presents an invalid cookie */ + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); - NET_INC_STATS_BH(sock_net(sk), foc->len ? - LINUX_MIB_TCPFASTOPENPASSIVEFAIL : - LINUX_MIB_TCPFASTOPENCOOKIEREQD); *foc = valid_foc; return false; } |