summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-04-18 21:43:15 +0300
committerDavid S. Miller <davem@davemloft.net>2018-04-19 20:44:11 +0300
commit88078d98d1bb085d72af8437707279e203524fa5 (patch)
treebf20f9ebb371aa14ce699ad9be0afc093babd04b /include/linux
parent292eba02dbb4c41da840e462e51ee97d80d873d1 (diff)
downloadlinux-88078d98d1bb085d72af8437707279e203524fa5.tar.xz
net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends
After working on IP defragmentation lately, I found that some large packets defeat CHECKSUM_COMPLETE optimization because of NIC adding zero paddings on the last (small) fragment. While removing the padding with pskb_trim_rcsum(), we set skb->ip_summed to CHECKSUM_NONE, forcing a full csum validation, even if all prior fragments had CHECKSUM_COMPLETE set. We can instead compute the checksum of the part we are trimming, usually smaller than the part we keep. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9065477ed255..d274059529eb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3131,6 +3131,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -3144,9 +3145,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)