summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2006-12-07 07:32:36 +0300
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 19:39:22 +0300
commitb30973f877fea1a3fb84e05599890fcc082a88e5 (patch)
tree198f7c31d5945288c1195348ac4e521ba90d81d6
parent873481367edb18a7d0d7e5a285e6728c16bb44a9 (diff)
downloadlinux-b30973f877fea1a3fb84e05599890fcc082a88e5.tar.xz
[PATCH] node-aware skb allocation
Node-aware allocation of skbs for the receive path. Details: - __alloc_skb gets a new node argument and cals the node-aware slab functions with it. - netdev_alloc_skb passed the node number it gets from dev_to_node to it, everyone else passes -1 (any node) Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--net/core/skbuff.c12
2 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a05a5f7c0b73..1d649f3eb006 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -332,17 +332,17 @@ struct sk_buff {
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int fclone);
+ gfp_t priority, int fclone, int node);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 0);
+ return __alloc_skb(size, priority, 0, -1);
}
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 1);
+ return __alloc_skb(size, priority, 1, -1);
}
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e1c385e5ba9..7217fb8928f2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
* @gfp_mask: allocation mask
* @fclone: allocate from fclone cache instead of head cache
* and allocate a cloned (child) skb
+ * @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of size bytes. The object has a reference count of one.
@@ -141,7 +142,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- int fclone)
+ int fclone, int node)
{
kmem_cache_t *cache;
struct skb_shared_info *shinfo;
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
/* Get the HEAD */
- skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
/* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size);
- data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
- gfp_mask);
+ data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+ gfp_mask, node);
if (!data)
goto nodata;
@@ -267,9 +268,10 @@ nodata:
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask)
{
+ int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
struct sk_buff *skb;
- skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;