From f3a7c66b5ce0b75a9774a50b5dcce93e5ba28370 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Sat, 14 Feb 2009 22:58:35 -0800 Subject: net: replace __constant_{endian} uses in net headers Base versions handle constant folding now. For headers exposed to userspace, we must only expose the __ prefixed versions. Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- include/linux/tcp.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux/tcp.h') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fe77e1499ab7..0cd99e6baca5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -69,16 +69,16 @@ union tcp_word_hdr { #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) enum { - TCP_FLAG_CWR = __constant_htonl(0x00800000), - TCP_FLAG_ECE = __constant_htonl(0x00400000), - TCP_FLAG_URG = __constant_htonl(0x00200000), - TCP_FLAG_ACK = __constant_htonl(0x00100000), - TCP_FLAG_PSH = __constant_htonl(0x00080000), - TCP_FLAG_RST = __constant_htonl(0x00040000), - TCP_FLAG_SYN = __constant_htonl(0x00020000), - TCP_FLAG_FIN = __constant_htonl(0x00010000), - TCP_RESERVED_BITS = __constant_htonl(0x0F000000), - TCP_DATA_OFFSET = __constant_htonl(0xF0000000) + TCP_FLAG_CWR = __cpu_to_be32(0x00800000), + TCP_FLAG_ECE = __cpu_to_be32(0x00400000), + TCP_FLAG_URG = __cpu_to_be32(0x00200000), + TCP_FLAG_ACK = __cpu_to_be32(0x00100000), + TCP_FLAG_PSH = __cpu_to_be32(0x00080000), + TCP_FLAG_RST = __cpu_to_be32(0x00040000), + TCP_FLAG_SYN = __cpu_to_be32(0x00020000), + TCP_FLAG_FIN = __cpu_to_be32(0x00010000), + TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), + TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) }; /* TCP socket options */ -- cgit v1.2.3 From cabeccbd172cc305f4383f5a4808ae254745275f Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Sat, 28 Feb 2009 04:44:38 +0000 Subject: tcp: kill eff_sacks "cache", the sole user can calculate itself MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also fixes insignificant bug that would cause sending of stale SACK block (would occur in some corner cases). Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 - include/net/tcp.h | 1 - net/ipv4/tcp_input.c | 15 ++------------- net/ipv4/tcp_minisocks.c | 3 +-- net/ipv4/tcp_output.c | 12 ++++++------ 5 files changed, 9 insertions(+), 23 deletions(-) (limited to 'include/linux/tcp.h') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 0cd99e6baca5..4b86ad71e054 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -218,7 +218,6 @@ struct tcp_options_received { snd_wscale : 4, /* Window scaling received from sender */ rcv_wscale : 4; /* Window scaling to send to receiver */ /* SACKs data */ - u8 eff_sacks; /* Size of SACK array to send with next packet */ u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 0366a559afec..055e4946d4c8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -926,7 +926,6 @@ extern void tcp_done(struct sock *sk); static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) { rx_opt->dsack = 0; - rx_opt->eff_sacks = 0; rx_opt->num_sacks = 0; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 03f5ede87224..e4442a293eb0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4099,7 +4099,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1; } } @@ -4154,8 +4153,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) * Decrease num_sacks. */ tp->rx_opt.num_sacks--; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + - tp->rx_opt.dsack; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; @@ -4218,7 +4215,6 @@ new_sack: sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; } /* RCV.NXT advances, some SACKs should be eaten. */ @@ -4232,7 +4228,6 @@ static void tcp_sack_remove(struct tcp_sock *tp) /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; - tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; } @@ -4253,11 +4248,8 @@ static void tcp_sack_remove(struct tcp_sock *tp) this_sack++; sp++; } - if (num_sacks != tp->rx_opt.num_sacks) { + if (num_sacks != tp->rx_opt.num_sacks) tp->rx_opt.num_sacks = num_sacks; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + - tp->rx_opt.dsack; - } } /* This one checks to see if we can put data from the @@ -4333,10 +4325,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) TCP_ECN_accept_cwr(tp, skb); - if (tp->rx_opt.dsack) { + if (tp->rx_opt.dsack) tp->rx_opt.dsack = 0; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks; - } /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. @@ -4456,7 +4446,6 @@ drop: if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->rx_opt.dsack = 0; - tp->rx_opt.eff_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f67effbb102b..bb3d8b35f19a 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -434,9 +434,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rx_opt.saw_tstamp = 0; newtp->rx_opt.dsack = 0; - newtp->rx_opt.eff_sacks = 0; - newtp->rx_opt.num_sacks = 0; + newtp->urg_data = 0; if (sock_flag(newsk, SOCK_KEEPOPEN)) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 61445b57610c..1555bb73b638 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -441,10 +441,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, *ptr++ = htonl(sp[this_sack].end_seq); } - if (tp->rx_opt.dsack) { + if (tp->rx_opt.dsack) tp->rx_opt.dsack = 0; - tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks; - } } } @@ -550,6 +548,7 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); unsigned size = 0; + unsigned int eff_sacks; #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); @@ -568,10 +567,11 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, size += TCPOLEN_TSTAMP_ALIGNED; } - if (unlikely(tp->rx_opt.eff_sacks)) { + eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; + if (unlikely(eff_sacks)) { const unsigned remaining = MAX_TCP_OPTION_SPACE - size; opts->num_sack_blocks = - min_t(unsigned, tp->rx_opt.eff_sacks, + min_t(unsigned, eff_sacks, (remaining - TCPOLEN_SACK_BASE_ALIGNED) / TCPOLEN_SACK_PERBLOCK); size += TCPOLEN_SACK_BASE_ALIGNED + @@ -1418,7 +1418,7 @@ static int tcp_mtu_probe(struct sock *sk) icsk->icsk_mtup.probe_size || inet_csk(sk)->icsk_ca_state != TCP_CA_Open || tp->snd_cwnd < 11 || - tp->rx_opt.eff_sacks) + tp->rx_opt.num_sacks || tp->rx_opt.dsack) return -1; /* Very simple search strategy: just double the MSS. */ -- cgit v1.2.3 From 0c54b85f2828128274f319a1eb3ce7f604fe2a53 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Sat, 14 Mar 2009 14:23:05 +0000 Subject: tcp: simplify tcp_current_mss MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's very little need for most of the callsites to get tp->xmit_goal_size updated. That will cost us divide as is, so slice the function in two. Also, the only users of the tp->xmit_goal_size are directly behind tcp_current_mss(), so there's no need to store that variable into tcp_sock at all! The drop of xmit_goal_size currently leaves 16-bit hole and some reorganization would again be necessary to change that (but I'm aiming to fill that hole with u16 xmit_goal_size_segs to cache the results of the remaining divide to get that tso on regression). Bring xmit_goal_size parts into tcp.c Signed-off-by: Ilpo Järvinen Cc: Evgeniy Polyakov Cc: Ingo Molnar Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 - include/net/tcp.h | 13 +++++++++++-- net/ipv4/tcp.c | 43 +++++++++++++++++++++++++++++++++++-------- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_output.c | 41 +++++++---------------------------------- 5 files changed, 54 insertions(+), 46 deletions(-) (limited to 'include/linux/tcp.h') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4b86ad71e054..ad2021ccc55a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -248,7 +248,6 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal; /* Goal for segmenting output packets */ /* * Header prediction flags diff --git a/include/net/tcp.h b/include/net/tcp.h index 255ca35bea05..e54c76d75495 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -481,7 +481,16 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) } extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); -extern unsigned int tcp_current_mss(struct sock *sk, int large); +extern unsigned int tcp_current_mss(struct sock *sk); + +/* Bound MSS / TSO packet size with the half of the window */ +static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) +{ + if (tp->max_window && pktsize > (tp->max_window >> 1)) + return max(tp->max_window >> 1, 68U - tp->tcp_header_len); + else + return pktsize; +} /* tcp.c */ extern void tcp_get_info(struct sock *, struct tcp_info *); @@ -822,7 +831,7 @@ static inline void tcp_push_pending_frames(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); + __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d3f9beee74c0..886596ff0aae 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -661,6 +661,37 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) return NULL; } +static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, + int large_allowed) +{ + struct tcp_sock *tp = tcp_sk(sk); + u32 xmit_size_goal; + + xmit_size_goal = mss_now; + + if (large_allowed && sk_can_gso(sk)) { + xmit_size_goal = ((sk->sk_gso_max_size - 1) - + inet_csk(sk)->icsk_af_ops->net_header_len - + inet_csk(sk)->icsk_ext_hdr_len - + tp->tcp_header_len); + + xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); + xmit_size_goal -= (xmit_size_goal % mss_now); + } + + return xmit_size_goal; +} + +static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) +{ + int mss_now; + + mss_now = tcp_current_mss(sk); + *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); + + return mss_now; +} + static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) { @@ -677,8 +708,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); - size_goal = tp->xmit_size_goal; + mss_now = tcp_send_mss(sk, &size_goal, flags); copied = 0; err = -EPIPE; @@ -761,8 +791,7 @@ wait_for_memory: if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; - mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); - size_goal = tp->xmit_size_goal; + mss_now = tcp_send_mss(sk, &size_goal, flags); } out: @@ -844,8 +873,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); - size_goal = tp->xmit_size_goal; + mss_now = tcp_send_mss(sk, &size_goal, flags); /* Ok commence sending. */ iovlen = msg->msg_iovlen; @@ -1007,8 +1035,7 @@ wait_for_memory: if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; - mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); - size_goal = tp->xmit_size_goal; + mss_now = tcp_send_mss(sk, &size_goal, flags); } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 311c30f73ee4..fae78e3eccc4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2864,7 +2864,7 @@ void tcp_simple_retransmit(struct sock *sk) const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - unsigned int mss = tcp_current_mss(sk, 0); + unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 325658039139..c1f259d2d33b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -921,7 +921,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) * factor and mss. */ if (tcp_skb_pcount(skb) > 1) - tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); + tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); return 0; } @@ -982,15 +982,6 @@ void tcp_mtup_init(struct sock *sk) icsk->icsk_mtup.probe_size = 0; } -/* Bound MSS / TSO packet size with the half of the window */ -static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) -{ - if (tp->max_window && pktsize > (tp->max_window >> 1)) - return max(tp->max_window >> 1, 68U - tp->tcp_header_len); - else - return pktsize; -} - /* This function synchronize snd mss to current pmtu/exthdr set. tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts @@ -1037,22 +1028,17 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) /* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. */ -unsigned int tcp_current_mss(struct sock *sk, int large_allowed) +unsigned int tcp_current_mss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; - u16 xmit_size_goal; - int doing_tso = 0; unsigned header_len; struct tcp_out_options opts; struct tcp_md5sig_key *md5; mss_now = tp->mss_cache; - if (large_allowed && sk_can_gso(sk)) - doing_tso = 1; - if (dst) { u32 mtu = dst_mtu(dst); if (mtu != inet_csk(sk)->icsk_pmtu_cookie) @@ -1070,19 +1056,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) mss_now -= delta; } - xmit_size_goal = mss_now; - - if (doing_tso) { - xmit_size_goal = ((sk->sk_gso_max_size - 1) - - inet_csk(sk)->icsk_af_ops->net_header_len - - inet_csk(sk)->icsk_ext_hdr_len - - tp->tcp_header_len); - - xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); - xmit_size_goal -= (xmit_size_goal % mss_now); - } - tp->xmit_size_goal = xmit_size_goal; - return mss_now; } @@ -1264,7 +1237,7 @@ int tcp_may_send_now(struct sock *sk) struct sk_buff *skb = tcp_send_head(sk); return (skb && - tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), + tcp_snd_test(sk, skb, tcp_current_mss(sk), (tcp_skb_is_last(sk, skb) ? tp->nonagle : TCP_NAGLE_PUSH))); } @@ -1421,7 +1394,7 @@ static int tcp_mtu_probe(struct sock *sk) return -1; /* Very simple search strategy: just double the MSS. */ - mss_now = tcp_current_mss(sk, 0); + mss_now = tcp_current_mss(sk); probe_size = 2 * tp->mss_cache; size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { @@ -1903,7 +1876,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) return -EHOSTUNREACH; /* Routing failure or similar. */ - cur_mss = tcp_current_mss(sk, 0); + cur_mss = tcp_current_mss(sk); /* If receiver has shrunk his window, and skb is out of * new window, do not retransmit it. The exception is the @@ -2111,7 +2084,7 @@ void tcp_send_fin(struct sock *sk) * unsent frames. But be careful about outgoing SACKS * and IP options. */ - mss_now = tcp_current_mss(sk, 1); + mss_now = tcp_current_mss(sk); if (tcp_send_head(sk) != NULL) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; @@ -2523,7 +2496,7 @@ int tcp_write_wakeup(struct sock *sk) if ((skb = tcp_send_head(sk)) != NULL && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { int err; - unsigned int mss = tcp_current_mss(sk, 0); + unsigned int mss = tcp_current_mss(sk); unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) -- cgit v1.2.3 From 2a3a041c4e2c1685e668b280c121a5a40a029a03 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Sat, 14 Mar 2009 22:45:16 +0000 Subject: tcp: cache result of earlier divides when mss-aligning things MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The results is very unlikely change every so often so we hardly need to divide again after doing that once for a connection. Yet, if divide still becomes necessary we detect that and do the right thing and again settle for non-divide state. Takes the u16 space which was previously taken by the plain xmit_size_goal. This should take care part of the tso vs non-tso difference we found earlier. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + net/ipv4/tcp.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux/tcp.h') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ad2021ccc55a..9d5078bd23a3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -248,6 +248,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ + u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ /* * Header prediction flags diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 886596ff0aae..0db9f3b984f7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -665,7 +665,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); - u32 xmit_size_goal; + u32 xmit_size_goal, old_size_goal; xmit_size_goal = mss_now; @@ -676,7 +676,17 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, tp->tcp_header_len); xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); - xmit_size_goal -= (xmit_size_goal % mss_now); + + /* We try hard to avoid divides here */ + old_size_goal = tp->xmit_size_goal_segs * mss_now; + + if (likely(old_size_goal <= xmit_size_goal && + old_size_goal + mss_now > xmit_size_goal)) { + xmit_size_goal = old_size_goal; + } else { + tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + xmit_size_goal = tp->xmit_size_goal_segs * mss_now; + } } return xmit_size_goal; -- cgit v1.2.3