summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-23 21:34:36 +0400
committerDavid S. Miller <davem@davemloft.net>2012-04-24 07:36:58 +0400
commit783c175f902b1ae011f12de45770e7912638ea1a (patch)
tree7da5c4b2280c15b4d54e13ed95c1351a14800018 /net/ipv4
parentd7ccf7c0a0585a126109a4b7c2a309184bfa4cba (diff)
downloadlinux-783c175f902b1ae011f12de45770e7912638ea1a.tar.xz
tcp: tcp_try_coalesce returns a boolean
This clarifies code intention, as suggested by David. Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c1c611b385a7..c93b0cbb7fc1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4460,23 +4460,23 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
* to reduce overall memory use and queue lengths, if cost is small.
* Packets in ofo or receive queues can stay a long time.
* Better try to coalesce them right now to avoid future collapses.
- * Returns > 0 value if caller should free @from instead of queueing it
+ * Returns true if caller should free @from instead of queueing it
*/
-static int tcp_try_coalesce(struct sock *sk,
- struct sk_buff *to,
- struct sk_buff *from)
+static bool tcp_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from)
{
int len = from->len;
if (tcp_hdr(from)->fin)
- return 0;
+ return false;
if (len <= skb_tailroom(to)) {
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
merge:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
- return 1;
+ return true;
}
if (skb_headlen(from) == 0 &&
!skb_has_frag_list(to) &&
@@ -4499,7 +4499,7 @@ merge:
to->data_len += len;
goto merge;
}
- return 0;
+ return false;
}
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
@@ -4540,7 +4540,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) {
- if (tcp_try_coalesce(sk, skb1, skb) <= 0) {
+ if (!tcp_try_coalesce(sk, skb1, skb)) {
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
} else {
__kfree_skb(skb);
@@ -4672,7 +4672,7 @@ queue_and_out:
goto drop;
tail = skb_peek_tail(&sk->sk_receive_queue);
- eaten = tail ? tcp_try_coalesce(sk, tail, skb) : -1;
+ eaten = (tail && tcp_try_coalesce(sk, tail, skb)) ? 1 : 0;
if (eaten <= 0) {
skb_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);