diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 22 | 
1 files changed, 16 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 734cfc8ff76e..45f750e85714 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)  	u32 new_sample = tp->rcv_rtt_est.rtt_us;  	long m = sample; -	if (m == 0) -		m = 1; -  	if (new_sample != 0) {  		/* If we sample in larger samples in the non-timestamp  		 * case, we could grossly overestimate the RTT especially @@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)  	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))  		return;  	delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); +	if (!delta_us) +		delta_us = 1;  	tcp_rcv_rtt_update(tp, delta_us, 1);  new_measure: @@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,  	    (TCP_SKB_CB(skb)->end_seq -  	     TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {  		u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; -		u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); +		u32 delta_us; +		if (!delta) +			delta = 1; +		delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);  		tcp_rcv_rtt_update(tp, delta_us, 0);  	}  } @@ -579,6 +581,7 @@ void tcp_rcv_space_adjust(struct sock *sk)  	int time;  	int copied; +	tcp_mstamp_refresh(tp);  	time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);  	if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)  		return; @@ -1941,6 +1944,8 @@ void tcp_enter_loss(struct sock *sk)  	if (is_reneg) {  		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);  		tp->sacked_out = 0; +		/* Mark SACK reneging until we recover from this loss event. */ +		tp->is_sack_reneg = 1;  	}  	tcp_clear_all_retrans_hints(tp); @@ -2326,6 +2331,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)  	}  	tp->snd_cwnd_stamp = tcp_jiffies32;  	tp->undo_marker = 0; +	tp->rack.advanced = 1; /* Force RACK to re-exam losses */  }  static inline bool tcp_may_undo(const struct tcp_sock *tp) @@ -2364,6 +2370,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)  		return true;  	}  	tcp_set_ca_state(sk, TCP_CA_Open); +	tp->is_sack_reneg = 0;  	return false;  } @@ -2397,8 +2404,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)  			NET_INC_STATS(sock_net(sk),  					LINUX_MIB_TCPSPURIOUSRTOS);  		inet_csk(sk)->icsk_retransmits = 0; -		if (frto_undo || tcp_is_sack(tp)) +		if (frto_undo || tcp_is_sack(tp)) {  			tcp_set_ca_state(sk, TCP_CA_Open); +			tp->is_sack_reneg = 0; +		}  		return true;  	}  	return false; @@ -3495,6 +3504,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  	struct tcp_sacktag_state sack_state;  	struct rate_sample rs = { .prior_delivered = 0 };  	u32 prior_snd_una = tp->snd_una; +	bool is_sack_reneg = tp->is_sack_reneg;  	u32 ack_seq = TCP_SKB_CB(skb)->seq;  	u32 ack = TCP_SKB_CB(skb)->ack_seq;  	bool is_dupack = false; @@ -3611,7 +3621,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  	delivered = tp->delivered - delivered;	/* freshly ACKed or SACKed */  	lost = tp->lost - lost;			/* freshly marked lost */ -	tcp_rate_gen(sk, delivered, lost, sack_state.rate); +	tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);  	tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);  	tcp_xmit_recovery(sk, rexmit);  	return 1;  | 
