summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--net/core/skbuff.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e50f2d4867c1..c5441154795c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -106,10 +106,9 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
GRO_MAX_HEAD_PAD))
-/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
- * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
- * size, and we can differentiate heads from skb_small_head_cache
- * vs system slabs by looking at their size (skb_end_offset()).
+/* SKB_SMALL_HEAD_CACHE_SIZE is the size used for the skbuff_small_head
+ * kmem_cache. The non-power-of-2 padding is kept for historical reasons and
+ * to avoid potential collisions with generic kmalloc bucket sizes.
*/
#define SKB_SMALL_HEAD_CACHE_SIZE \
(is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
@@ -1071,7 +1070,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
return 0;
}
-static void skb_kfree_head(void *head, unsigned int end_offset)
+static void skb_kfree_head(void *head)
{
kfree(head);
}
@@ -1085,7 +1084,7 @@ static void skb_free_head(struct sk_buff *skb)
return;
skb_free_frag(head);
} else {
- skb_kfree_head(head, skb_end_offset(skb));
+ skb_kfree_head(head);
}
}
@@ -2361,7 +2360,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
return 0;
nofrags:
- skb_kfree_head(data, size);
+ skb_kfree_head(data);
nodata:
return -ENOMEM;
}
@@ -2407,20 +2406,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
if (likely(skb_end_offset(skb) == saved_end_offset))
return 0;
- /* We can not change skb->end if the original or new value
- * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
- */
- if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
- skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
- /* We think this path should not be taken.
- * Add a temporary trace to warn us just in case.
- */
- pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
- saved_end_offset, skb_end_offset(skb));
- WARN_ON_ONCE(1);
- return 0;
- }
-
shinfo = skb_shinfo(skb);
/* We are about to change back skb->end,
@@ -6815,7 +6800,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_cloned(skb)) {
/* drop the old head gracefully */
if (skb_orphan_frags(skb, gfp_mask)) {
- skb_kfree_head(data, size);
+ skb_kfree_head(data);
return -ENOMEM;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -6922,7 +6907,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
if (skb_orphan_frags(skb, gfp_mask)) {
- skb_kfree_head(data, size);
+ skb_kfree_head(data);
return -ENOMEM;
}
shinfo = (struct skb_shared_info *)(data + size);
@@ -6958,7 +6943,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
if (skb_has_frag_list(skb))
kfree_skb_list(skb_shinfo(skb)->frag_list);
- skb_kfree_head(data, size);
+ skb_kfree_head(data);
return -ENOMEM;
}
skb_release_data(skb, SKB_CONSUMED);