diff options
| author | Eric Dumazet <edumazet@google.com> | 2026-01-09 23:38:36 +0300 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-01-13 06:30:57 +0300 |
| commit | 0391ab577c6e2ed7a0d8cb3e7a1da58497b6ff4f (patch) | |
| tree | 7bd42716776da17a5cac9457e0098a2166f0bed2 | |
| parent | d7161b07904797563023cc48822c86d3c41abf2c (diff) | |
| download | linux-0391ab577c6e2ed7a0d8cb3e7a1da58497b6ff4f.tar.xz | |
net: add skbuff_clear() helper
clang is unable to inline the memset() calls in net/core/skbuff.c
when initializing allocated sk_buff.
memset(skb, 0, offsetof(struct sk_buff, tail));
This is unfortunate, because:
1) calling external memset_orig() helper adds a call/ret and
typical setup cost.
2) offsetof(struct sk_buff, tail) == 0xb8 = 0x80 + 0x38
On x86_64, memset_orig() performs two 64 bytes clear,
then has to loop 7 times to clear the final 56 bytes.
skbuff_clear() makes sure the minimal and optimal code
is generated.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260109203836.1667441-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| -rw-r--r-- | net/core/skbuff.c | 32 |
1 files changed, 22 insertions, 10 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a56133902c0d..4887099e8678 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -307,6 +307,23 @@ static struct sk_buff *napi_skb_cache_get(bool alloc) return skb; } +/* + * Only clear those fields we need to clear, not those that we will + * actually initialise later. Hence, don't put any more fields after + * the tail pointer in struct sk_buff! + */ +static inline void skbuff_clear(struct sk_buff *skb) +{ + /* Replace memset(skb, 0, offsetof(struct sk_buff, tail)) + * with two smaller memset(), with a barrier() between them. + * This forces the compiler to inline both calls. + */ + BUILD_BUG_ON(offsetof(struct sk_buff, tail) <= 128); + memset(skb, 0, 128); + barrier(); + memset((void *)skb + 128, 0, offsetof(struct sk_buff, tail) - 128); +} + /** * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache * @skbs: pointer to an at least @n-sized array to fill with skb pointers @@ -357,7 +374,7 @@ get: skbs[i] = nc->skb_cache[base + i]; kasan_mempool_unpoison_object(skbs[i], skbuff_cache_size); - memset(skbs[i], 0, offsetof(struct sk_buff, tail)); + skbuff_clear(skbs[i]); } nc->skb_count -= n; @@ -424,7 +441,7 @@ struct sk_buff *slab_build_skb(void *data) if (unlikely(!skb)) return NULL; - memset(skb, 0, offsetof(struct sk_buff, tail)); + skbuff_clear(skb); data = __slab_build_skb(data, &size); __finalize_skb_around(skb, data, size); @@ -476,7 +493,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) if (unlikely(!skb)) return NULL; - memset(skb, 0, offsetof(struct sk_buff, tail)); + skbuff_clear(skb); __build_skb_around(skb, data, frag_size); return skb; @@ -537,7 +554,7 @@ static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) if (unlikely(!skb)) return NULL; - memset(skb, 0, offsetof(struct sk_buff, tail)); + skbuff_clear(skb); __build_skb_around(skb, data, frag_size); return skb; @@ -696,12 +713,7 @@ fallback: */ prefetchw(data + SKB_WITH_OVERHEAD(size)); - /* - * Only clear those fields we need to clear, not those that we will - * actually initialise below. Hence, don't put any more fields after - * the tail pointer in struct sk_buff! - */ - memset(skb, 0, offsetof(struct sk_buff, tail)); + skbuff_clear(skb); __build_skb_around(skb, data, size); skb->pfmemalloc = pfmemalloc; |
