summaryrefslogtreecommitdiff
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f89e7fd59a4c..ac74ee085d74 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
- * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
- * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 64
+#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -2132,7 +2134,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
struct skb_shared_info *shinfo = skb_shinfo(skb);
- if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
+ unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
return true;
}