diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 19:38:01 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 19:38:01 +0300 |
commit | a1e8fad5900fa94adb500c6e0dfd60a307f7a3c9 (patch) | |
tree | c619277e587a99c90e76a1e7b63746af4d959d72 /include | |
parent | e3166331a3288dd7184548896a1c7ab682f0dbe8 (diff) | |
parent | a45b0616e7ee9db4c1b2b9a4a79a974325fa9bf3 (diff) | |
download | linux-a1e8fad5900fa94adb500c6e0dfd60a307f7a3c9.tar.xz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Fix a crash during slabinfo -v
tracing/slab: Move kmalloc tracepoint out of inline code
slub: Fix slub_lock down/up imbalance
slub: Fix build breakage in Documentation/vm
slub tracing: move trace calls out of always inlined functions to reduce kernel code size
slub: move slabinfo.c to tools/slub/slabinfo.c
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/slab_def.h | 33 | ||||
-rw-r--r-- | include/linux/slub_def.h | 55 |
2 files changed, 39 insertions, 49 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 791a502f6906..83203ae9390b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); +extern void *kmem_cache_alloc_trace(size_t size, + struct kmem_cache *cachep, gfp_t flags); extern size_t slab_buffer_size(struct kmem_cache *cachep); #else static __always_inline void * -kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) +kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) { return kmem_cache_alloc(cachep, flags); } @@ -179,10 +180,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; - ret = kmem_cache_alloc_notrace(cachep, flags); - - trace_kmalloc(_THIS_IP_, ret, - size, slab_buffer_size(cachep), flags); + ret = kmem_cache_alloc_trace(size, cachep, flags); return ret; } @@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, - gfp_t flags, - int nodeid); +extern void *kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid); #else static __always_inline void * -kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, - gfp_t flags, - int nodeid) +kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid) { return kmem_cache_alloc_node(cachep, flags, nodeid); } @@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *cachep; - void *ret; if (__builtin_constant_p(size)) { int i = 0; @@ -234,13 +233,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; - ret = kmem_cache_alloc_node_notrace(cachep, flags, node); - - trace_kmalloc_node(_THIS_IP_, ret, - size, slab_buffer_size(cachep), - flags, node); - - return ret; + return kmem_cache_alloc_node_trace(size, cachep, flags, node); } return __kmalloc_node(size, flags, node); } diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index e4f5ed180b9b..8b6e8ae5d5ca 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,9 +10,8 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> -#include <linux/kmemleak.h> -#include <trace/events/kmem.h> +#include <linux/kmemleak.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +static __always_inline void * +kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + kmemleak_alloc(ret, size, 1, flags); + return ret; +} + #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); +extern void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); #else static __always_inline void * -kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { return kmem_cache_alloc(s, gfpflags); } + +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} #endif static __always_inline void *kmalloc_large(size_t size, gfp_t flags) { unsigned int order = get_order(size); - void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); - - kmemleak_alloc(ret, size, 1, flags); - trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); - - return ret; + return kmalloc_order_trace(size, flags, order); } static __always_inline void *kmalloc(size_t size, gfp_t flags) { - void *ret; - if (__builtin_constant_p(size)) { if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); @@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) if (!s) return ZERO_SIZE_PTR; - ret = kmem_cache_alloc_notrace(s, flags); - - trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); - - return ret; + return kmem_cache_alloc_trace(s, flags, size); } } return __kmalloc(size, flags); @@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node); + int node, size_t size); #else static __always_inline void * -kmem_cache_alloc_node_notrace(struct kmem_cache *s, +kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node) + int node, size_t size) { return kmem_cache_alloc_node(s, gfpflags, node); } @@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s, static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { - void *ret; - if (__builtin_constant_p(size) && size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) if (!s) return ZERO_SIZE_PTR; - ret = kmem_cache_alloc_node_notrace(s, flags, node); - - trace_kmalloc_node(_THIS_IP_, ret, - size, s->size, flags, node); - - return ret; + return kmem_cache_alloc_node_trace(s, flags, node, size); } return __kmalloc_node(size, flags, node); } |