diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-06 16:23:39 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-06 16:23:39 +0400 |
commit | 68083e05d72d94f347293d8cc0067050ba904bfa (patch) | |
tree | 842e71365bd90866be7add181661a4039d891564 /mm/slub.c | |
parent | 7baac8b91f9871ba8cb09af84de4ae1d86d07812 (diff) | |
parent | b7279469d66b55119784b8b9529c99c1955fe747 (diff) | |
download | linux-68083e05d72d94f347293d8cc0067050ba904bfa.tar.xz |
Merge commit 'v2.6.26-rc9' into cpus4096
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c index a505a828ef41..1a427c0ae83b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5,7 +5,7 @@ * The allocator synchronizes using per slab locks and only * uses a centralized lock to manage a pool of partial slabs. * - * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> + * (C) 2007 SGI, Christoph Lameter */ #include <linux/mm.h> @@ -2726,9 +2726,10 @@ size_t ksize(const void *object) page = virt_to_head_page(object); - if (unlikely(!PageSlab(page))) + if (unlikely(!PageSlab(page))) { + WARN_ON(!PageCompound(page)); return PAGE_SIZE << compound_order(page); - + } s = page->slab; #ifdef CONFIG_SLUB_DEBUG @@ -2994,8 +2995,6 @@ void __init kmem_cache_init(void) create_kmalloc_cache(&kmalloc_caches[1], "kmalloc-96", 96, GFP_KERNEL); caches++; - } - if (KMALLOC_MIN_SIZE <= 128) { create_kmalloc_cache(&kmalloc_caches[2], "kmalloc-192", 192, GFP_KERNEL); caches++; @@ -3025,6 +3024,16 @@ void __init kmem_cache_init(void) for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; + if (KMALLOC_MIN_SIZE == 128) { + /* + * The 192 byte sized cache is not used if the alignment + * is 128 byte. Redirect kmalloc to use the 256 byte cache + * instead. + */ + for (i = 128 + 8; i <= 192; i += 8) + size_index[(i - 1) / 8] = 8; + } + slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ |