diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 3 | ||||
-rw-r--r-- | net/sctp/input.c | 1 | ||||
-rw-r--r-- | net/sctp/output.c | 6 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 8 | ||||
-rw-r--r-- | net/sctp/socket.c | 76 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 2 |
6 files changed, 36 insertions, 60 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 297d9cf960b9..a827a1f562bf 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) /* Get the lowest pmtu of all the transports. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { - sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst)); + sctp_transport_update_pmtu(t, + atomic_read(&t->mtu_info)); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) diff --git a/net/sctp/input.c b/net/sctp/input.c index 9bbc5f92c941..5c36a99882ed 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, return; if (sock_owned_by_user(sk)) { + atomic_set(&t->mtu_info, pmtu); asoc->pmtu_pending = 1; t->pmtu_pending = 1; return; diff --git a/net/sctp/output.c b/net/sctp/output.c index 7f849b01ec8e..67939ad99c01 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, sctp_assoc_sync_pmtu(asoc); } + if (asoc->pmtu_pending) { + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); + asoc->pmtu_pending = 0; + } + /* If there a is a prepend chunk stick it on the list before * any other chunks get appended. */ diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 42191ed9902b..9cb854b05342 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -385,9 +385,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, asoc->outqueue.outstanding_bytes -= sctp_data_size(chk); } - msg_len -= SCTP_DATA_SNDSIZE(chk) + - sizeof(struct sk_buff) + - sizeof(struct sctp_chunk); + msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); if (msg_len <= 0) break; } @@ -421,9 +419,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; } - msg_len -= SCTP_DATA_SNDSIZE(chk) + - sizeof(struct sk_buff) + - sizeof(struct sctp_chunk); + msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); sctp_chunk_free(chk); if (msg_len <= 0) break; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f73e9d38d5ba..fc0386e8ff23 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -83,7 +83,7 @@ #include <net/sctp/stream_sched.h> /* Forward declarations for internal helper functions. */ -static int sctp_writeable(struct sock *sk); +static bool sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len); @@ -119,25 +119,10 @@ static void sctp_enter_memory_pressure(struct sock *sk) /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { - int amt; + struct sock *sk = asoc->base.sk; - if (asoc->ep->sndbuf_policy) - amt = asoc->sndbuf_used; - else - amt = sk_wmem_alloc_get(asoc->base.sk); - - if (amt >= asoc->base.sk->sk_sndbuf) { - if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) - amt = 0; - else { - amt = sk_stream_wspace(asoc->base.sk); - if (amt < 0) - amt = 0; - } - } else { - amt = asoc->base.sk->sk_sndbuf - amt; - } - return amt; + return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used + : sk_stream_wspace(sk); } /* Increment the used sndbuf space count of the corresponding association by @@ -166,12 +151,9 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; - asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + - sizeof(struct sk_buff) + - sizeof(struct sctp_chunk); - refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); - sk->sk_wmem_queued += chunk->skb->truesize; + asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk); + sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk); sk_mem_charge(sk, chunk->skb->truesize); } @@ -271,11 +253,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); + if (asoc && (asoc->base.sk != sk || asoc->base.dead)) + asoc = NULL; spin_unlock_bh(&sctp_assocs_id_lock); - if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) - return NULL; - return asoc; } @@ -1928,10 +1909,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, asoc->pmtu_pending = 0; } - if (sctp_wspace(asoc) < msg_len) + if (sctp_wspace(asoc) < (int)msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); - if (!sctp_wspace(asoc)) { + if (sctp_wspace(asoc) <= 0) { timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) @@ -1946,8 +1927,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (sp->strm_interleave) { timeo = sock_sndtimeo(sk, 0); err = sctp_wait_for_connect(asoc, &timeo); - if (err) + if (err) { + err = -ESRCH; goto err; + } } else { wait_connect = true; } @@ -7100,14 +7083,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, } policy = params.sprstat_policy; - if (policy & ~SCTP_PR_SCTP_MASK) + if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc) goto out; - if (policy == SCTP_PR_SCTP_NONE) { + if (policy & SCTP_PR_SCTP_ALL) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { @@ -7159,7 +7142,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, } policy = params.sprstat_policy; - if (policy & ~SCTP_PR_SCTP_MASK) + if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); @@ -7175,7 +7158,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, goto out; } - if (policy == SCTP_PR_SCTP_NONE) { + if (policy == SCTP_PR_SCTP_ALL) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { @@ -8460,17 +8443,11 @@ static void sctp_wfree(struct sk_buff *skb) struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; - asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + - sizeof(struct sk_buff) + - sizeof(struct sctp_chunk); - - WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); - - /* - * This undoes what is done via sctp_set_owner_w and sk_mem_charge - */ - sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); + sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk); + asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk); + WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), + &sk->sk_wmem_alloc)); if (chunk->shkey) { struct sctp_shared_key *shkey = chunk->shkey; @@ -8544,7 +8521,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, goto do_error; if (signal_pending(current)) goto do_interrupted; - if (msg_len <= sctp_wspace(asoc)) + if ((int)msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going @@ -8619,14 +8596,9 @@ void sctp_write_space(struct sock *sk) * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ -static int sctp_writeable(struct sock *sk) +static bool sctp_writeable(struct sock *sk) { - int amt = 0; - - amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); - if (amt < 0) - amt = 0; - return amt; + return sk->sk_sndbuf > sk->sk_wmem_queued; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 0b427100b0d4..331cc734e3db 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -459,7 +459,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul * element in the queue, then count it towards * possible PD. */ - if (pos == ulpq->reasm.next) { + if (skb_queue_is_first(&ulpq->reasm, pos)) { pd_first = pos; pd_last = pos; pd_len = pos->len; |