From b9fd683982c9d190cbccd8a32d885bf84bb4a12d Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:36 -0300 Subject: sctp: add sctp_packet_singleton Factor out the code for generating singletons. It's used only once, but helps to keep the context contained. The const variables are to ease the reading of subsequent calls in there. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index dee7cbd54831..300bd0dfc7c1 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -776,6 +776,20 @@ void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) sctp_outq_flush(q, 0, gfp); } +static int sctp_packet_singleton(struct sctp_transport *transport, + struct sctp_chunk *chunk, gfp_t gfp) +{ + const struct sctp_association *asoc = transport->asoc; + const __u16 sport = asoc->base.bind_addr.port; + const __u16 dport = asoc->peer.port; + const __u32 vtag = asoc->peer.i.init_tag; + struct sctp_packet singleton; + + sctp_packet_init(&singleton, transport, sport, dport); + sctp_packet_config(&singleton, vtag, 0); + sctp_packet_append_chunk(&singleton, chunk); + return sctp_packet_transmit(&singleton, gfp); +} /* * Try to flush an outqueue. @@ -789,10 +803,7 @@ void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) { struct sctp_packet *packet; - struct sctp_packet singleton; struct sctp_association *asoc = q->asoc; - __u16 sport = asoc->base.bind_addr.port; - __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; @@ -905,10 +916,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: - sctp_packet_init(&singleton, transport, sport, dport); - sctp_packet_config(&singleton, vtag, 0); - sctp_packet_append_chunk(&singleton, chunk); - error = sctp_packet_transmit(&singleton, gfp); + error = sctp_packet_singleton(transport, chunk, gfp); if (error < 0) { asoc->base.sk->sk_err = -error; return; -- cgit v1.2.3 From 0d634b0c94c9d8298f24a58e4af56c85dc06bb35 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:37 -0300 Subject: sctp: factor out sctp_outq_select_transport We had two spots doing such complex operation and they were very close to each other, a bit more tailored to here or there. This patch unifies these under the same function, sctp_outq_select_transport, which knows how to handle control chunks and original transmissions (but not retransmissions). Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 187 +++++++++++++++++++++++++--------------------------- 1 file changed, 90 insertions(+), 97 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 300bd0dfc7c1..bda50596d4bf 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -791,6 +791,90 @@ static int sctp_packet_singleton(struct sctp_transport *transport, return sctp_packet_transmit(&singleton, gfp); } +static bool sctp_outq_select_transport(struct sctp_chunk *chunk, + struct sctp_association *asoc, + struct sctp_transport **transport, + struct list_head *transport_list) +{ + struct sctp_transport *new_transport = chunk->transport; + struct sctp_transport *curr = *transport; + bool changed = false; + + if (!new_transport) { + if (!sctp_chunk_is_data(chunk)) { + /* + * If we have a prior transport pointer, see if + * the destination address of the chunk + * matches the destination address of the + * current transport. If not a match, then + * try to look up the transport with a given + * destination address. We do this because + * after processing ASCONFs, we may have new + * transports created. + */ + if (curr && sctp_cmp_addr_exact(&chunk->dest, + &curr->ipaddr)) + new_transport = curr; + else + new_transport = sctp_assoc_lookup_paddr(asoc, + &chunk->dest); + } + + /* if we still don't have a new transport, then + * use the current active path. + */ + if (!new_transport) + new_transport = asoc->peer.active_path; + } else { + __u8 type; + + switch (new_transport->state) { + case SCTP_INACTIVE: + case SCTP_UNCONFIRMED: + case SCTP_PF: + /* If the chunk is Heartbeat or Heartbeat Ack, + * send it to chunk->transport, even if it's + * inactive. + * + * 3.3.6 Heartbeat Acknowledgement: + * ... + * A HEARTBEAT ACK is always sent to the source IP + * address of the IP datagram containing the + * HEARTBEAT chunk to which this ack is responding. + * ... + * + * ASCONF_ACKs also must be sent to the source. + */ + type = chunk->chunk_hdr->type; + if (type != SCTP_CID_HEARTBEAT && + type != SCTP_CID_HEARTBEAT_ACK && + type != SCTP_CID_ASCONF_ACK) + new_transport = asoc->peer.active_path; + break; + default: + break; + } + } + + /* Are we switching transports? Take care of transport locks. */ + if (new_transport != curr) { + changed = true; + curr = new_transport; + *transport = curr; + if (list_empty(&curr->send_ready)) + list_add_tail(&curr->send_ready, transport_list); + + sctp_packet_config(&curr->packet, asoc->peer.i.init_tag, + asoc->peer.ecn_capable); + /* We've switched transports, so apply the + * Burst limit to the new transport. + */ + sctp_transport_burst_limited(curr); + } + + return changed; +} + /* * Try to flush an outqueue. * @@ -806,7 +890,6 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) struct sctp_association *asoc = q->asoc; __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; - struct sctp_transport *new_transport; struct sctp_chunk *chunk, *tmp; enum sctp_xmit status; int error = 0; @@ -843,68 +926,12 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) list_del_init(&chunk->list); - /* Pick the right transport to use. */ - new_transport = chunk->transport; - - if (!new_transport) { - /* - * If we have a prior transport pointer, see if - * the destination address of the chunk - * matches the destination address of the - * current transport. If not a match, then - * try to look up the transport with a given - * destination address. We do this because - * after processing ASCONFs, we may have new - * transports created. - */ - if (transport && - sctp_cmp_addr_exact(&chunk->dest, - &transport->ipaddr)) - new_transport = transport; - else - new_transport = sctp_assoc_lookup_paddr(asoc, - &chunk->dest); - - /* if we still don't have a new transport, then - * use the current active path. - */ - if (!new_transport) - new_transport = asoc->peer.active_path; - } else if ((new_transport->state == SCTP_INACTIVE) || - (new_transport->state == SCTP_UNCONFIRMED) || - (new_transport->state == SCTP_PF)) { - /* If the chunk is Heartbeat or Heartbeat Ack, - * send it to chunk->transport, even if it's - * inactive. - * - * 3.3.6 Heartbeat Acknowledgement: - * ... - * A HEARTBEAT ACK is always sent to the source IP - * address of the IP datagram containing the - * HEARTBEAT chunk to which this ack is responding. - * ... - * - * ASCONF_ACKs also must be sent to the source. - */ - if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && - chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && - chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) - new_transport = asoc->peer.active_path; - } - - /* Are we switching transports? - * Take care of transport locks. + /* Pick the right transport to use. Should always be true for + * the first chunk as we don't have a transport by then. */ - if (new_transport != transport) { - transport = new_transport; - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } + if (sctp_outq_select_transport(chunk, asoc, &transport, + &transport_list)) packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - } switch (chunk->chunk_hdr->type) { /* @@ -1072,43 +1099,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) goto sctp_flush_out; } - /* If there is a specified transport, use it. - * Otherwise, we want to use the active path. - */ - new_transport = chunk->transport; - if (!new_transport || - ((new_transport->state == SCTP_INACTIVE) || - (new_transport->state == SCTP_UNCONFIRMED) || - (new_transport->state == SCTP_PF))) - new_transport = asoc->peer.active_path; - if (new_transport->state == SCTP_UNCONFIRMED) { - WARN_ONCE(1, "Attempt to send packet on unconfirmed path."); - sctp_sched_dequeue_done(q, chunk); - sctp_chunk_fail(chunk, 0); - sctp_chunk_free(chunk); - continue; - } - - /* Change packets if necessary. */ - if (new_transport != transport) { - transport = new_transport; - - /* Schedule to have this transport's - * packet flushed. - */ - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } - + if (sctp_outq_select_transport(chunk, asoc, &transport, + &transport_list)) packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - /* We've switched transports, so apply the - * Burst limit to the new transport. - */ - sctp_transport_burst_limited(transport); - } pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " "skb->users:%d\n", -- cgit v1.2.3 From 7a0b9df65a9e291b614850891af485d1121ef54c Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:38 -0300 Subject: sctp: move the flush of ctrl chunks into its own function Named sctp_outq_flush_ctrl and, with that, keep the contexts contained. One small fix embedded is the reset of one_packet at every iteration. This allows bundling of some control chunks in case they were preceeded by another control chunk that cannot be bundled. Other than this, it has the same behavior. Changes since v2: - Fixed panic reported by kbuild test robot if building with only up to this patch applied, due to bad parameter to sctp_outq_select_transport and by not initializing packet after calling sctp_outq_flush_ctrl. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 92 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 56 insertions(+), 36 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index bda50596d4bf..92f14f51edf2 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -875,45 +875,21 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk, return changed; } -/* - * Try to flush an outqueue. - * - * Description: Send everything in q which we legally can, subject to - * congestion limitations. - * * Note: This function can be called from multiple contexts so appropriate - * locking concerns must be made. Today we use the sock lock to protect - * this function. - */ -static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) +static void sctp_outq_flush_ctrl(struct sctp_outq *q, + struct sctp_transport **_transport, + struct list_head *transport_list, + gfp_t gfp) { - struct sctp_packet *packet; + struct sctp_transport *transport = *_transport; struct sctp_association *asoc = q->asoc; - __u32 vtag = asoc->peer.i.init_tag; - struct sctp_transport *transport = NULL; + struct sctp_packet *packet = NULL; struct sctp_chunk *chunk, *tmp; enum sctp_xmit status; - int error = 0; - int start_timer = 0; - int one_packet = 0; - - /* These transports have chunks to send. */ - struct list_head transport_list; - struct list_head *ltransport; - - INIT_LIST_HEAD(&transport_list); - packet = NULL; - - /* - * 6.10 Bundling - * ... - * When bundling control chunks with DATA chunks, an - * endpoint MUST place control chunks first in the outbound - * SCTP packet. The transmitter MUST transmit DATA chunks - * within a SCTP packet in increasing order of TSN. - * ... - */ + int one_packet, error; list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + one_packet = 0; + /* RFC 5061, 5.3 * F1) This means that until such time as the ASCONF * containing the add is acknowledged, the sender MUST @@ -929,9 +905,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* Pick the right transport to use. Should always be true for * the first chunk as we don't have a transport by then. */ - if (sctp_outq_select_transport(chunk, asoc, &transport, - &transport_list)) + if (sctp_outq_select_transport(chunk, asoc, _transport, + transport_list)) { + transport = *_transport; packet = &transport->packet; + } switch (chunk->chunk_hdr->type) { /* @@ -954,6 +932,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) if (sctp_test_T_bit(chunk)) packet->vtag = asoc->c.my_vtag; /* fallthru */ + /* The following chunks are "response" chunks, i.e. * they are generated in response to something we * received. If we are sending these, then we can @@ -979,7 +958,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) case SCTP_CID_RECONF: status = sctp_packet_transmit_chunk(packet, chunk, one_packet, gfp); - if (status != SCTP_XMIT_OK) { + if (status != SCTP_XMIT_OK) { /* put the chunk back */ list_add(&chunk->list, &q->control_chunk_list); break; @@ -1006,6 +985,47 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) BUG(); } } +} + +/* + * Try to flush an outqueue. + * + * Description: Send everything in q which we legally can, subject to + * congestion limitations. + * * Note: This function can be called from multiple contexts so appropriate + * locking concerns must be made. Today we use the sock lock to protect + * this function. + */ +static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) +{ + struct sctp_packet *packet; + struct sctp_association *asoc = q->asoc; + __u32 vtag = asoc->peer.i.init_tag; + struct sctp_transport *transport = NULL; + struct sctp_chunk *chunk; + enum sctp_xmit status; + int error = 0; + int start_timer = 0; + + /* These transports have chunks to send. */ + struct list_head transport_list; + struct list_head *ltransport; + + INIT_LIST_HEAD(&transport_list); + packet = NULL; + + /* + * 6.10 Bundling + * ... + * When bundling control chunks with DATA chunks, an + * endpoint MUST place control chunks first in the outbound + * SCTP packet. The transmitter MUST transmit DATA chunks + * within a SCTP packet in increasing order of TSN. + * ... + */ + + sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp); + packet = &transport->packet; if (q->asoc->src_out_of_asoc_ok) goto sctp_flush_out; -- cgit v1.2.3 From 96e0418e812e9c30211b55ffcddc5f03bfd96919 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:39 -0300 Subject: sctp: move outq data rtx code out of sctp_outq_flush This patch renames current sctp_outq_flush_rtx to __sctp_outq_flush_rtx and create a new sctp_outq_flush_rtx, with the code that was on sctp_outq_flush. Again, the idea is to have functions with small and defined objectives. Yes, there is an open-coded path selection in the now sctp_outq_flush_rtx. That is kept as is for now because it may be very different when we implement retransmission path selection algorithms for CMT-SCTP. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 101 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 58 insertions(+), 43 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 92f14f51edf2..49e80bf2ade7 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -601,14 +601,14 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, /* * Transmit DATA chunks on the retransmit queue. Upon return from - * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which + * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which * need to be transmitted by the caller. * We assume that pkt->transport has already been set. * * The return value is a normal kernel error return value. */ -static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, - int rtx_timeout, int *start_timer) +static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, + int rtx_timeout, int *start_timer) { struct sctp_transport *transport = pkt->transport; struct sctp_chunk *chunk, *chunk1; @@ -987,6 +987,57 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, } } +/* Returns false if new data shouldn't be sent */ +static bool sctp_outq_flush_rtx(struct sctp_outq *q, + struct sctp_transport **_transport, + struct list_head *transport_list, + int rtx_timeout) +{ + struct sctp_transport *transport = *_transport; + struct sctp_packet *packet = transport ? &transport->packet : NULL; + struct sctp_association *asoc = q->asoc; + int error, start_timer = 0; + + if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) + return false; + + if (transport != asoc->peer.retran_path) { + /* Switch transports & prepare the packet. */ + transport = asoc->peer.retran_path; + *_transport = transport; + + if (list_empty(&transport->send_ready)) + list_add_tail(&transport->send_ready, + transport_list); + + packet = &transport->packet; + sctp_packet_config(packet, asoc->peer.i.init_tag, + asoc->peer.ecn_capable); + } + + error = __sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer); + if (error < 0) + asoc->base.sk->sk_err = -error; + + if (start_timer) { + sctp_transport_reset_t3_rtx(transport); + transport->last_time_sent = jiffies; + } + + /* This can happen on COOKIE-ECHO resend. Only + * one chunk can get bundled with a COOKIE-ECHO. + */ + if (packet->has_cookie_echo) + return false; + + /* Don't send new data if there is still data + * waiting to retransmit. + */ + if (!list_empty(&q->retransmit)) + return false; + + return true; +} /* * Try to flush an outqueue. * @@ -1000,12 +1051,10 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) { struct sctp_packet *packet; struct sctp_association *asoc = q->asoc; - __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; struct sctp_chunk *chunk; enum sctp_xmit status; int error = 0; - int start_timer = 0; /* These transports have chunks to send. */ struct list_head transport_list; @@ -1053,45 +1102,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) * current cwnd). */ if (!list_empty(&q->retransmit)) { - if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) - goto sctp_flush_out; - if (transport == asoc->peer.retran_path) - goto retran; - - /* Switch transports & prepare the packet. */ - - transport = asoc->peer.retran_path; - - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } - + if (!sctp_outq_flush_rtx(q, &transport, &transport_list, + rtx_timeout)) + break; + /* We may have switched current transport */ packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - retran: - error = sctp_outq_flush_rtx(q, packet, - rtx_timeout, &start_timer); - if (error < 0) - asoc->base.sk->sk_err = -error; - - if (start_timer) { - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; - } - - /* This can happen on COOKIE-ECHO resend. Only - * one chunk can get bundled with a COOKIE-ECHO. - */ - if (packet->has_cookie_echo) - goto sctp_flush_out; - - /* Don't send new data if there is still data - * waiting to retransmit. - */ - if (!list_empty(&q->retransmit)) - goto sctp_flush_out; } /* Apply Max.Burst limitation to the current transport in -- cgit v1.2.3 From cb93cc5d06d9b1016326376a4980d11a9040afd2 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:40 -0300 Subject: sctp: move flushing of data chunks out of sctp_outq_flush To the new sctp_outq_flush_data. Again, smaller functions and with well defined objectives. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 149 ++++++++++++++++++++++++++-------------------------- 1 file changed, 75 insertions(+), 74 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 49e80bf2ade7..bfa2e43dfd31 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1038,46 +1038,17 @@ static bool sctp_outq_flush_rtx(struct sctp_outq *q, return true; } -/* - * Try to flush an outqueue. - * - * Description: Send everything in q which we legally can, subject to - * congestion limitations. - * * Note: This function can be called from multiple contexts so appropriate - * locking concerns must be made. Today we use the sock lock to protect - * this function. - */ -static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) + +static void sctp_outq_flush_data(struct sctp_outq *q, + struct sctp_transport **_transport, + struct list_head *transport_list, + int rtx_timeout, gfp_t gfp) { - struct sctp_packet *packet; + struct sctp_transport *transport = *_transport; + struct sctp_packet *packet = transport ? &transport->packet : NULL; struct sctp_association *asoc = q->asoc; - struct sctp_transport *transport = NULL; struct sctp_chunk *chunk; enum sctp_xmit status; - int error = 0; - - /* These transports have chunks to send. */ - struct list_head transport_list; - struct list_head *ltransport; - - INIT_LIST_HEAD(&transport_list); - packet = NULL; - - /* - * 6.10 Bundling - * ... - * When bundling control chunks with DATA chunks, an - * endpoint MUST place control chunks first in the outbound - * SCTP packet. The transmitter MUST transmit DATA chunks - * within a SCTP packet in increasing order of TSN. - * ... - */ - - sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp); - packet = &transport->packet; - - if (q->asoc->src_out_of_asoc_ok) - goto sctp_flush_out; /* Is it OK to send data chunks? */ switch (asoc->state) { @@ -1102,10 +1073,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) * current cwnd). */ if (!list_empty(&q->retransmit)) { - if (!sctp_outq_flush_rtx(q, &transport, &transport_list, + if (!sctp_outq_flush_rtx(q, _transport, transport_list, rtx_timeout)) break; /* We may have switched current transport */ + transport = *_transport; packet = &transport->packet; } @@ -1131,12 +1103,14 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { sctp_outq_head_data(q, chunk); - goto sctp_flush_out; + break; } - if (sctp_outq_select_transport(chunk, asoc, &transport, - &transport_list)) + if (sctp_outq_select_transport(chunk, asoc, _transport, + transport_list)) { + transport = *_transport; packet = &transport->packet; + } pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " "skb->users:%d\n", @@ -1148,8 +1122,10 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* Add the chunk to the packet. */ status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); - switch (status) { + case SCTP_XMIT_OK: + break; + case SCTP_XMIT_PMTU_FULL: case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_DELAY: @@ -1161,41 +1137,25 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) status); sctp_outq_head_data(q, chunk); - goto sctp_flush_out; - - case SCTP_XMIT_OK: - /* The sender is in the SHUTDOWN-PENDING state, - * The sender MAY set the I-bit in the DATA - * chunk header. - */ - if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) - chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) - asoc->stats.ouodchunks++; - else - asoc->stats.oodchunks++; - - /* Only now it's safe to consider this - * chunk as sent, sched-wise. - */ - sctp_sched_dequeue_done(q, chunk); - - break; - - default: - BUG(); + return; } - /* BUG: We assume that the sctp_packet_transmit() - * call below will succeed all the time and add the - * chunk to the transmitted list and restart the - * timers. - * It is possible that the call can fail under OOM - * conditions. - * - * Is this really a problem? Won't this behave - * like a lost TSN? + /* The sender is in the SHUTDOWN-PENDING state, + * The sender MAY set the I-bit in the DATA + * chunk header. */ + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) + chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + asoc->stats.ouodchunks++; + else + asoc->stats.oodchunks++; + + /* Only now it's safe to consider this + * chunk as sent, sched-wise. + */ + sctp_sched_dequeue_done(q, chunk); + list_add_tail(&chunk->transmitted_list, &transport->transmitted); @@ -1206,7 +1166,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) * COOKIE-ECHO chunk. */ if (packet->has_cookie_echo) - goto sctp_flush_out; + break; } break; @@ -1214,6 +1174,47 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* Do nothing. */ break; } +} + +/* + * Try to flush an outqueue. + * + * Description: Send everything in q which we legally can, subject to + * congestion limitations. + * * Note: This function can be called from multiple contexts so appropriate + * locking concerns must be made. Today we use the sock lock to protect + * this function. + */ +static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) +{ + struct sctp_packet *packet; + struct sctp_association *asoc = q->asoc; + struct sctp_transport *transport = NULL; + int error = 0; + + /* These transports have chunks to send. */ + struct list_head transport_list; + struct list_head *ltransport; + + INIT_LIST_HEAD(&transport_list); + packet = NULL; + + /* + * 6.10 Bundling + * ... + * When bundling control chunks with DATA chunks, an + * endpoint MUST place control chunks first in the outbound + * SCTP packet. The transmitter MUST transmit DATA chunks + * within a SCTP packet in increasing order of TSN. + * ... + */ + + sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp); + + if (q->asoc->src_out_of_asoc_ok) + goto sctp_flush_out; + + sctp_outq_flush_data(q, &transport, &transport_list, rtx_timeout, gfp); sctp_flush_out: -- cgit v1.2.3 From 4bf21b61f2bb3611ddb4fb75170c83905dd5c29e Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:41 -0300 Subject: sctp: move transport flush code out of sctp_outq_flush To the new sctp_outq_flush_transports. Comment on Nagle is outdated and removed. Nagle is performed earlier, while checking if the chunk fits the packet: if the outq length is not enough to fill the packet, it returns SCTP_XMIT_DELAY. So by when it gets to sctp_outq_flush_transports, it has to go through all enlisted transports. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 56 +++++++++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 30 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index bfa2e43dfd31..44465e64857b 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1176,6 +1176,29 @@ static void sctp_outq_flush_data(struct sctp_outq *q, } } +static void sctp_outq_flush_transports(struct sctp_outq *q, + struct list_head *transport_list, + gfp_t gfp) +{ + struct list_head *ltransport; + struct sctp_packet *packet; + struct sctp_transport *t; + int error = 0; + + while ((ltransport = sctp_list_dequeue(transport_list)) != NULL) { + t = list_entry(ltransport, struct sctp_transport, send_ready); + packet = &t->packet; + if (!sctp_packet_empty(packet)) { + error = sctp_packet_transmit(packet, gfp); + if (error < 0) + q->asoc->base.sk->sk_err = -error; + } + + /* Clear the burst limited state, if any */ + sctp_transport_burst_reset(t); + } +} + /* * Try to flush an outqueue. * @@ -1187,17 +1210,10 @@ static void sctp_outq_flush_data(struct sctp_outq *q, */ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) { - struct sctp_packet *packet; - struct sctp_association *asoc = q->asoc; + /* Current transport being used. It's NOT the same as curr active one */ struct sctp_transport *transport = NULL; - int error = 0; - /* These transports have chunks to send. */ - struct list_head transport_list; - struct list_head *ltransport; - - INIT_LIST_HEAD(&transport_list); - packet = NULL; + LIST_HEAD(transport_list); /* * 6.10 Bundling @@ -1218,27 +1234,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) sctp_flush_out: - /* Before returning, examine all the transports touched in - * this call. Right now, we bluntly force clear all the - * transports. Things might change after we implement Nagle. - * But such an examination is still required. - * - * --xguo - */ - while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) { - struct sctp_transport *t = list_entry(ltransport, - struct sctp_transport, - send_ready); - packet = &t->packet; - if (!sctp_packet_empty(packet)) { - error = sctp_packet_transmit(packet, gfp); - if (error < 0) - asoc->base.sk->sk_err = -error; - } - - /* Clear the burst limited state, if any */ - sctp_transport_burst_reset(t); - } + sctp_outq_flush_transports(q, &transport_list, gfp); } /* Update unack_data based on the incoming SACK chunk */ -- cgit v1.2.3 From 6605f694823965fe53ce761dad3aaf965861dc7e Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:42 -0300 Subject: sctp: make use of gfp on retransmissions Retransmissions may be triggered when in user context, so lets make use of gfp. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 44465e64857b..e1632b8e2900 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -608,7 +608,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, * The return value is a normal kernel error return value. */ static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, - int rtx_timeout, int *start_timer) + int rtx_timeout, int *start_timer, gfp_t gfp) { struct sctp_transport *transport = pkt->transport; struct sctp_chunk *chunk, *chunk1; @@ -684,12 +684,12 @@ redo: * control chunks are already freed so there * is nothing we can do. */ - sctp_packet_transmit(pkt, GFP_ATOMIC); + sctp_packet_transmit(pkt, gfp); goto redo; } /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* If we are retransmitting, we should only * send a single packet. @@ -705,7 +705,7 @@ redo: case SCTP_XMIT_RWND_FULL: /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* Stop sending DATA as there is no more room * at the receiver. @@ -715,7 +715,7 @@ redo: case SCTP_XMIT_DELAY: /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* Stop sending DATA because of nagle delay. */ done = 1; @@ -991,7 +991,7 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, static bool sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_transport **_transport, struct list_head *transport_list, - int rtx_timeout) + int rtx_timeout, gfp_t gfp) { struct sctp_transport *transport = *_transport; struct sctp_packet *packet = transport ? &transport->packet : NULL; @@ -1015,7 +1015,8 @@ static bool sctp_outq_flush_rtx(struct sctp_outq *q, asoc->peer.ecn_capable); } - error = __sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer); + error = __sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer, + gfp); if (error < 0) asoc->base.sk->sk_err = -error; @@ -1074,7 +1075,7 @@ static void sctp_outq_flush_data(struct sctp_outq *q, */ if (!list_empty(&q->retransmit)) { if (!sctp_outq_flush_rtx(q, _transport, transport_list, - rtx_timeout)) + rtx_timeout, gfp)) break; /* We may have switched current transport */ transport = *_transport; -- cgit v1.2.3 From 4fdbb0efb9de51295071fdf968d476c1f0605d21 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Mon, 14 May 2018 14:34:43 -0300 Subject: sctp: rework switch cases in sctp_outq_flush_data Remove an inner one, which tended to be error prone due to the cascading and it can be replaced by a simple if (). Rework the outer one so that the actual flush code is not inside it. Now we first validate if we can or cannot send data, return if not, and then the flush code. Suggested-by: Xin Long Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/outqueue.c | 191 +++++++++++++++++++++++++--------------------------- 1 file changed, 93 insertions(+), 98 deletions(-) (limited to 'net/sctp/outqueue.c') diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index e1632b8e2900..e9c22b3db11c 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1058,122 +1058,117 @@ static void sctp_outq_flush_data(struct sctp_outq *q, * chunk. */ if (!packet || !packet->has_cookie_echo) - break; + return; /* fallthru */ case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: - /* - * RFC 2960 6.1 Transmission of DATA Chunks - * - * C) When the time comes for the sender to transmit, - * before sending new DATA chunks, the sender MUST - * first transmit any outstanding DATA chunks which - * are marked for retransmission (limited by the - * current cwnd). - */ - if (!list_empty(&q->retransmit)) { - if (!sctp_outq_flush_rtx(q, _transport, transport_list, - rtx_timeout, gfp)) - break; - /* We may have switched current transport */ - transport = *_transport; - packet = &transport->packet; - } + break; - /* Apply Max.Burst limitation to the current transport in - * case it will be used for new data. We are going to - * rest it before we return, but we want to apply the limit - * to the currently queued data. - */ - if (transport) - sctp_transport_burst_limited(transport); - - /* Finally, transmit new packets. */ - while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { - __u32 sid = ntohs(chunk->subh.data_hdr->stream); - - /* Has this chunk expired? */ - if (sctp_chunk_abandoned(chunk)) { - sctp_sched_dequeue_done(q, chunk); - sctp_chunk_fail(chunk, 0); - sctp_chunk_free(chunk); - continue; - } + default: + /* Do nothing. */ + return; + } - if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { - sctp_outq_head_data(q, chunk); - break; - } + /* + * RFC 2960 6.1 Transmission of DATA Chunks + * + * C) When the time comes for the sender to transmit, + * before sending new DATA chunks, the sender MUST + * first transmit any outstanding DATA chunks which + * are marked for retransmission (limited by the + * current cwnd). + */ + if (!list_empty(&q->retransmit)) { + if (!sctp_outq_flush_rtx(q, _transport, transport_list, + rtx_timeout, gfp)) + return; + /* We may have switched current transport */ + transport = *_transport; + packet = &transport->packet; + } - if (sctp_outq_select_transport(chunk, asoc, _transport, - transport_list)) { - transport = *_transport; - packet = &transport->packet; - } + /* Apply Max.Burst limitation to the current transport in + * case it will be used for new data. We are going to + * rest it before we return, but we want to apply the limit + * to the currently queued data. + */ + if (transport) + sctp_transport_burst_limited(transport); - pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " - "skb->users:%d\n", - __func__, q, chunk, chunk && chunk->chunk_hdr ? - sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : - "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), - chunk->skb ? chunk->skb->head : NULL, chunk->skb ? - refcount_read(&chunk->skb->users) : -1); - - /* Add the chunk to the packet. */ - status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); - switch (status) { - case SCTP_XMIT_OK: - break; + /* Finally, transmit new packets. */ + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { + __u32 sid = ntohs(chunk->subh.data_hdr->stream); - case SCTP_XMIT_PMTU_FULL: - case SCTP_XMIT_RWND_FULL: - case SCTP_XMIT_DELAY: - /* We could not append this chunk, so put - * the chunk back on the output queue. - */ - pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", - __func__, ntohl(chunk->subh.data_hdr->tsn), - status); + /* Has this chunk expired? */ + if (sctp_chunk_abandoned(chunk)) { + sctp_sched_dequeue_done(q, chunk); + sctp_chunk_fail(chunk, 0); + sctp_chunk_free(chunk); + continue; + } - sctp_outq_head_data(q, chunk); - return; - } + if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { + sctp_outq_head_data(q, chunk); + break; + } - /* The sender is in the SHUTDOWN-PENDING state, - * The sender MAY set the I-bit in the DATA - * chunk header. - */ - if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) - chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) - asoc->stats.ouodchunks++; - else - asoc->stats.oodchunks++; + if (sctp_outq_select_transport(chunk, asoc, _transport, + transport_list)) { + transport = *_transport; + packet = &transport->packet; + } - /* Only now it's safe to consider this - * chunk as sent, sched-wise. + pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " + "skb->users:%d\n", + __func__, q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), + chunk->skb ? chunk->skb->head : NULL, chunk->skb ? + refcount_read(&chunk->skb->users) : -1); + + /* Add the chunk to the packet. */ + status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); + if (status != SCTP_XMIT_OK) { + /* We could not append this chunk, so put + * the chunk back on the output queue. */ - sctp_sched_dequeue_done(q, chunk); + pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", + __func__, ntohl(chunk->subh.data_hdr->tsn), + status); - list_add_tail(&chunk->transmitted_list, - &transport->transmitted); + sctp_outq_head_data(q, chunk); + break; + } - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; + /* The sender is in the SHUTDOWN-PENDING state, + * The sender MAY set the I-bit in the DATA + * chunk header. + */ + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) + chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + asoc->stats.ouodchunks++; + else + asoc->stats.oodchunks++; - /* Only let one DATA chunk get bundled with a - * COOKIE-ECHO chunk. - */ - if (packet->has_cookie_echo) - break; - } - break; + /* Only now it's safe to consider this + * chunk as sent, sched-wise. + */ + sctp_sched_dequeue_done(q, chunk); - default: - /* Do nothing. */ - break; + list_add_tail(&chunk->transmitted_list, + &transport->transmitted); + + sctp_transport_reset_t3_rtx(transport); + transport->last_time_sent = jiffies; + + /* Only let one DATA chunk get bundled with a + * COOKIE-ECHO chunk. + */ + if (packet->has_cookie_echo) + break; } } -- cgit v1.2.3