diff options
author | Eric Dumazet <edumazet@google.com> | 2023-02-06 20:31:00 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-02-07 21:59:48 +0300 |
commit | 115f1a5c42bdad9a9ea356fc0b4a39ec7537947f (patch) | |
tree | 3940b5e63f565d6c5430d370a427d7f82031cd68 | |
parent | 61d731e6538dc44abf2dca6e77098ec6e85f7cc2 (diff) | |
download | linux-115f1a5c42bdad9a9ea356fc0b4a39ec7537947f.tar.xz |
net: add SKB_HEAD_ALIGN() helper
We have many places using this expression:
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
Use of SKB_HEAD_ALIGN() will allow to clean them.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | include/linux/skbuff.h | 8 | ||||
-rw-r--r-- | net/core/skbuff.c | 18 |
2 files changed, 14 insertions, 12 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1fa95b916342..c3df3b55da97 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -255,6 +255,14 @@ #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* For X bytes available in skb->head, what is the minimal + * allocation needed, knowing struct skb_shared_info needs + * to be aligned. + */ +#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + #define SKB_MAX_ORDER(X, ORDER) \ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 624e9e4ec116..4abfc3ba6898 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -558,8 +558,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); osize = kmalloc_size_roundup(size); data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc); if (unlikely(!data)) @@ -632,8 +631,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, goto skb_success; } - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - len = SKB_DATA_ALIGN(len); + len = SKB_HEAD_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; @@ -732,8 +730,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, data = page_frag_alloc_1k(&nc->page_small, gfp_mask); pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); } else { - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - len = SKB_DATA_ALIGN(len); + len = SKB_HEAD_ALIGN(len); data = page_frag_alloc(&nc->page, len, gfp_mask); pfmemalloc = nc->page.pfmemalloc; @@ -1938,8 +1935,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) @@ -6289,8 +6285,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) @@ -6408,8 +6403,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) |