diff options
author | Christoph Lameter <cl@linux.com> | 2010-08-20 21:37:15 +0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2010-10-02 11:24:27 +0400 |
commit | 51df1142816e469173889fb6d6dc810be9b9e022 (patch) | |
tree | e2827e87486675b514c68f06d67ac5980cd6ceb1 /include/linux/slub_def.h | |
parent | 6c182dc0de26ef97efb6a97a8deab074833764e7 (diff) | |
download | linux-51df1142816e469173889fb6d6dc810be9b9e022.tar.xz |
slub: Dynamically size kmalloc cache allocations
kmalloc caches are statically defined and may take up a lot of space just
because the sizes of the node array has to be dimensioned for the largest
node count supported.
This patch makes the size of the kmem_cache structure dynamic throughout by
creating a kmem_cache slab cache for the kmem_cache objects. The bootstrap
occurs by allocating the initial one or two kmem_cache objects from the
page allocator.
C2->C3
- Fix various issues indicated by David
- Make create kmalloc_cache return a kmem_cache * pointer.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9f63538928c0..a6c43ec6a4a5 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -139,19 +139,16 @@ struct kmem_cache { #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA -/* Reserve extra caches for potential DMA use */ -#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 -#define KMALLOC_CACHES SLUB_PAGE_SHIFT #endif /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; +extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -216,7 +213,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) if (index == 0) return NULL; - return &kmalloc_caches[index]; + return kmalloc_caches[index]; } void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |