summaryrefslogtreecommitdiff
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-14 06:37:18 +0300
committerDavid S. Miller <davem@davemloft.net>2017-01-14 06:37:18 +0300
commit718e14bb292a2e16b506133d191886110417df51 (patch)
treea889a29b8bbad8c1c64d339273bcdeea67d63299 /include/net/tcp.h
parent7410191afcaca3a49bb29bfb5e15f81d7b336984 (diff)
parent94bdc9785a1136cef6a982b042719783978e8a26 (diff)
downloadlinux-718e14bb292a2e16b506133d191886110417df51.tar.xz
Merge branch 'tcp-RACK-fast-recovery'
Yuchung Cheng says: ==================== tcp: RACK fast recovery The patch set enables RACK loss detection (draft-ietf-tcpm-rack-01) to trigger fast recovery with a reordering timer. Previously RACK has been running in auxiliary mode where it is used to detect packet losses once the recovery has triggered by other algorithms (e.g., FACK). By inspecting packet timestamps, RACK can start ACK-driven repairs timely. A few similar heuristics are no longer needed and are either removed or disabled to reduce the complexity of the Linux TCP loss recovery engine: 1. FACK (Forward Acknowledgement) 2. Early Retransmit (RFC5827) 3. thin_dupack (fast recovery on single DUPACK for thin-streams) 4. NCR (Non-Congestion Robustness RFC4653) (RFC4653) 5. Forward Retransmit After this change, Linux's loss recovery algorithms consist of 1. Conventional DUPACK threshold approach (RFC6675) 2. RACK and Tail Loss Probe (draft-ietf-tcpm-rack-01) 3. RTO plus F-RTO extension (RFC5682) The patch set has been tested on Google servers extensively and presented in several IETF meetings. The data suggests that RACK successfully improves recovery performance: https://www.ietf.org/proceedings/97/slides/slides-97-tcpm-draft-ietf-tcpm-rack-01.pdf https://www.ietf.org/proceedings/96/slides/slides-96-tcpm-3.pdf ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h40
1 files changed, 11 insertions, 29 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1da0aa724929..c55d65f74f7f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
+#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */
#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
@@ -261,6 +262,9 @@ extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
+extern int sysctl_tcp_recovery;
+#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
+
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
extern int sysctl_tcp_min_tso_segs;
@@ -397,6 +401,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
@@ -541,6 +546,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
+void tcp_enter_recovery(struct sock *sk, bool ece_ack);
int tcp_trim_head(struct sock *, struct sk_buff *, u32);
int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
@@ -559,7 +565,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
const struct sk_buff *next_skb);
/* tcp_input.c */
-void tcp_resume_early_retransmit(struct sock *sk);
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk);
@@ -1031,23 +1036,6 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
}
-/* TCP early-retransmit (ER) is similar to but more conservative than
- * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
- */
-static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
-{
- struct net *net = sock_net((struct sock *)tp);
-
- tp->do_early_retrans = sysctl_tcp_early_retrans &&
- sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
- net->ipv4.sysctl_tcp_reordering == 3;
-}
-
-static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
-{
- tp->do_early_retrans = 0;
-}
-
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
@@ -1856,17 +1844,11 @@ void tcp_v4_init(void);
void tcp_init(void);
/* tcp_recovery.c */
-
-/* Flags to enable various loss recovery features. See below */
-extern int sysctl_tcp_recovery;
-
-/* Use TCP RACK to detect (some) tail and retransmit losses */
-#define TCP_RACK_LOST_RETRANS 0x1
-
-extern int tcp_rack_mark_lost(struct sock *sk);
-
-extern void tcp_rack_advance(struct tcp_sock *tp,
- const struct skb_mstamp *xmit_time, u8 sacked);
+extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
+extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+ const struct skb_mstamp *xmit_time,
+ const struct skb_mstamp *ack_time);
+extern void tcp_rack_reo_timeout(struct sock *sk);
/*
* Save and compile IPv4 options, return a pointer to it