From b221385bc41d6789edde3d2fa0cb20d5045730eb Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 25 Sep 2006 23:31:02 -0700 Subject: [PATCH] mm/: make functions static This patch makes the following needlessly global functions static: - slab.c: kmem_find_general_cachep() - swap.c: __page_cache_release() - vmalloc.c: __vmalloc_node() Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 21ba06035700..5870bcbd33cf 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, return csizep->cs_cachep; } -struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) +static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) { return __find_general_cachep(size, gfpflags); } -EXPORT_SYMBOL(kmem_find_general_cachep); static size_t slab_mgmt_size(size_t nr_objs, size_t align) { -- cgit v1.2.3 From 7ff6f08295d90ab20d25200ef485ebb45b1b8d71 Mon Sep 17 00:00:00 2001 From: Martin Peschke Date: Mon, 25 Sep 2006 23:31:21 -0700 Subject: [PATCH] CPU hotplug compatible alloc_percpu() This patch splits alloc_percpu() up into two phases. Likewise for free_percpu(). This allows clients to limit initial allocations to online cpu's, and to populate or depopulate per-cpu data at run time as needed: struct my_struct *obj; /* initial allocation for online cpu's */ obj = percpu_alloc(sizeof(struct my_struct), GFP_KERNEL); ... /* populate per-cpu data for cpu coming online */ ptr = percpu_populate(obj, sizeof(struct my_struct), GFP_KERNEL, cpu); ... /* access per-cpu object */ ptr = percpu_ptr(obj, smp_processor_id()); ... /* depopulate per-cpu data for cpu going offline */ percpu_depopulate(obj, cpu); ... /* final removal */ percpu_free(obj); Signed-off-by: Martin Peschke Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/percpu.h | 79 +++++++++++++++++------ mm/slab.c | 166 ++++++++++++++++++++++++++++++++----------------- 2 files changed, 169 insertions(+), 76 deletions(-) (limited to 'mm/slab.c') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index f926490a7d8b..3835a9642f13 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -1,9 +1,12 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H + #include /* For preempt_disable() */ #include /* For kmalloc() */ #include #include /* For memset() */ +#include + #include /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ @@ -27,39 +30,77 @@ struct percpu_data { void *ptrs[NR_CPUS]; }; +#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) /* - * Use this to get to a cpu's version of the per-cpu object allocated using - * alloc_percpu. Non-atomic access to the current CPU's version should + * Use this to get to a cpu's version of the per-cpu object dynamically + * allocated. Non-atomic access to the current CPU's version should * probably be combined with get_cpu()/put_cpu(). */ -#define per_cpu_ptr(ptr, cpu) \ -({ \ - struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ +#define percpu_ptr(ptr, cpu) \ +({ \ + struct percpu_data *__p = __percpu_disguise(ptr); \ + (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *__alloc_percpu(size_t size); -extern void free_percpu(const void *); +extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); +extern void percpu_depopulate(void *__pdata, int cpu); +extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, + cpumask_t *mask); +extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); +extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); +extern void percpu_free(void *__pdata); #else /* CONFIG_SMP */ -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) +#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) + +static inline void percpu_depopulate(void *__pdata, int cpu) +{ +} + +static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) +{ +} -static inline void *__alloc_percpu(size_t size) +static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, + int cpu) { - void *ret = kmalloc(size, GFP_KERNEL); - if (ret) - memset(ret, 0, size); - return ret; + return percpu_ptr(__pdata, cpu); } -static inline void free_percpu(const void *ptr) -{ - kfree(ptr); + +static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, + cpumask_t *mask) +{ + return 0; +} + +static inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) +{ + return kzalloc(size, gfp); +} + +static inline void percpu_free(void *__pdata) +{ + kfree(__pdata); } #endif /* CONFIG_SMP */ -/* Simple wrapper for the common case: zeros memory. */ -#define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type)))) +#define percpu_populate_mask(__pdata, size, gfp, mask) \ + __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) +#define percpu_depopulate_mask(__pdata, mask) \ + __percpu_depopulate_mask((__pdata), &(mask)) +#define percpu_alloc_mask(size, gfp, mask) \ + __percpu_alloc_mask((size), (gfp), &(mask)) + +#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) + +/* (legacy) interface for use without CPU hotplug handling */ + +#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ + cpu_possible_map) +#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) +#define free_percpu(ptr) percpu_free((ptr)) +#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) #endif /* __LINUX_PERCPU_H */ diff --git a/mm/slab.c b/mm/slab.c index 5870bcbd33cf..00584dbbec03 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3371,52 +3371,127 @@ EXPORT_SYMBOL(__kmalloc_track_caller); #ifdef CONFIG_SMP /** - * __alloc_percpu - allocate one copy of the object for every present - * cpu in the system, zeroing them. - * Objects should be dereferenced using the per_cpu_ptr macro only. + * percpu_depopulate - depopulate per-cpu data for given cpu + * @__pdata: per-cpu data to depopulate + * @cpu: depopulate per-cpu data for this cpu * - * @size: how many bytes of memory are required. + * Depopulating per-cpu data for a cpu going offline would be a typical + * use case. You need to register a cpu hotplug handler for that purpose. */ -void *__alloc_percpu(size_t size) +void percpu_depopulate(void *__pdata, int cpu) { - int i; - struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); + struct percpu_data *pdata = __percpu_disguise(__pdata); + if (pdata->ptrs[cpu]) { + kfree(pdata->ptrs[cpu]); + pdata->ptrs[cpu] = NULL; + } +} +EXPORT_SYMBOL_GPL(percpu_depopulate); - if (!pdata) - return NULL; +/** + * percpu_depopulate_mask - depopulate per-cpu data for some cpu's + * @__pdata: per-cpu data to depopulate + * @mask: depopulate per-cpu data for cpu's selected through mask bits + */ +void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) +{ + int cpu; + for_each_cpu_mask(cpu, *mask) + percpu_depopulate(__pdata, cpu); +} +EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); - /* - * Cannot use for_each_online_cpu since a cpu may come online - * and we have no way of figuring out how to fix the array - * that we have allocated then.... - */ - for_each_possible_cpu(i) { - int node = cpu_to_node(i); +/** + * percpu_populate - populate per-cpu data for given cpu + * @__pdata: per-cpu data to populate further + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @cpu: populate per-data for this cpu + * + * Populating per-cpu data for a cpu coming online would be a typical + * use case. You need to register a cpu hotplug handler for that purpose. + * Per-cpu object is populated with zeroed buffer. + */ +void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) +{ + struct percpu_data *pdata = __percpu_disguise(__pdata); + int node = cpu_to_node(cpu); - if (node_online(node)) - pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); - else - pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); + BUG_ON(pdata->ptrs[cpu]); + if (node_online(node)) { + /* FIXME: kzalloc_node(size, gfp, node) */ + pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); + if (pdata->ptrs[cpu]) + memset(pdata->ptrs[cpu], 0, size); + } else + pdata->ptrs[cpu] = kzalloc(size, gfp); + return pdata->ptrs[cpu]; +} +EXPORT_SYMBOL_GPL(percpu_populate); - if (!pdata->ptrs[i]) - goto unwind_oom; - memset(pdata->ptrs[i], 0, size); - } +/** + * percpu_populate_mask - populate per-cpu data for more cpu's + * @__pdata: per-cpu data to populate further + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @mask: populate per-cpu data for cpu's selected through mask bits + * + * Per-cpu objects are populated with zeroed buffers. + */ +int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, + cpumask_t *mask) +{ + cpumask_t populated = CPU_MASK_NONE; + int cpu; + + for_each_cpu_mask(cpu, *mask) + if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { + __percpu_depopulate_mask(__pdata, &populated); + return -ENOMEM; + } else + cpu_set(cpu, populated); + return 0; +} +EXPORT_SYMBOL_GPL(__percpu_populate_mask); - /* Catch derefs w/o wrappers */ - return (void *)(~(unsigned long)pdata); +/** + * percpu_alloc_mask - initial setup of per-cpu data + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @mask: populate per-data for cpu's selected through mask bits + * + * Populating per-cpu data for all online cpu's would be a typical use case, + * which is simplified by the percpu_alloc() wrapper. + * Per-cpu objects are populated with zeroed buffers. + */ +void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) +{ + void *pdata = kzalloc(sizeof(struct percpu_data), gfp); + void *__pdata = __percpu_disguise(pdata); -unwind_oom: - while (--i >= 0) { - if (!cpu_possible(i)) - continue; - kfree(pdata->ptrs[i]); - } + if (unlikely(!pdata)) + return NULL; + if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) + return __pdata; kfree(pdata); return NULL; } -EXPORT_SYMBOL(__alloc_percpu); -#endif +EXPORT_SYMBOL_GPL(__percpu_alloc_mask); + +/** + * percpu_free - final cleanup of per-cpu data + * @__pdata: object to clean up + * + * We simply clean up any per-cpu object left. No need for the client to + * track and specify through a bis mask which per-cpu objects are to free. + */ +void percpu_free(void *__pdata) +{ + __percpu_depopulate_mask(__pdata, &cpu_possible_map); + kfree(__percpu_disguise(__pdata)); +} +EXPORT_SYMBOL_GPL(percpu_free); +#endif /* CONFIG_SMP */ /** * kmem_cache_free - Deallocate an object @@ -3463,29 +3538,6 @@ void kfree(const void *objp) } EXPORT_SYMBOL(kfree); -#ifdef CONFIG_SMP -/** - * free_percpu - free previously allocated percpu memory - * @objp: pointer returned by alloc_percpu. - * - * Don't free memory not originally allocated by alloc_percpu() - * The complemented objp is to check for that. - */ -void free_percpu(const void *objp) -{ - int i; - struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp); - - /* - * We allocate for all cpus so we cannot use for online cpu here. - */ - for_each_possible_cpu(i) - kfree(p->ptrs[i]); - kfree(p); -} -EXPORT_SYMBOL(free_percpu); -#endif - unsigned int kmem_cache_size(struct kmem_cache *cachep) { return obj_size(cachep); -- cgit v1.2.3 From ca5f9703dffa012cc46166e6206c5a992910e041 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 25 Sep 2006 23:31:25 -0700 Subject: [PATCH] slab: respect architecture and caller mandated alignment As explained by Heiko, on s390 (32-bit) ARCH_KMALLOC_MINALIGN is set to eight because their common I/O layer allocates data structures that need to have an eight byte alignment. This does not work when CONFIG_SLAB_DEBUG is enabled because kmem_cache_create will override alignment to BYTES_PER_WORD which is four. So change kmem_cache_create to ensure cache alignment is always at minimum what the architecture or caller mandates even if slab debugging is enabled. Cc: Heiko Carstens Cc: Christoph Lameter Signed-off-by: Manfred Spraul Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 00584dbbec03..d47d0e186973 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2096,6 +2096,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, } else { ralign = BYTES_PER_WORD; } + + /* + * Redzoning and user store require word alignment. Note this will be + * overridden by architecture or caller mandated alignment if either + * is greater than BYTES_PER_WORD. + */ + if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) + ralign = BYTES_PER_WORD; + /* 2) arch mandated alignment: disables debug if necessary */ if (ralign < ARCH_SLAB_MINALIGN) { ralign = ARCH_SLAB_MINALIGN; @@ -2109,8 +2118,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } /* - * 4) Store it. Note that the debug code below can reduce - * the alignment to BYTES_PER_WORD. + * 4) Store it. */ align = ralign; @@ -2122,20 +2130,19 @@ kmem_cache_create (const char *name, size_t size, size_t align, #if DEBUG cachep->obj_size = size; + /* + * Both debugging options require word-alignment which is calculated + * into align above. + */ if (flags & SLAB_RED_ZONE) { - /* redzoning only works with word aligned caches */ - align = BYTES_PER_WORD; - /* add space for red zone words */ cachep->obj_offset += BYTES_PER_WORD; size += 2 * BYTES_PER_WORD; } if (flags & SLAB_STORE_USER) { - /* user store requires word alignment and - * one word storage behind the end of the real - * object. + /* user store requires one word storage behind the end of + * the real object. */ - align = BYTES_PER_WORD; size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) -- cgit v1.2.3 From e5ac9c5aec7c4bc57fa93f2d37d760a22cb7bd33 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 25 Sep 2006 23:31:34 -0700 Subject: [PATCH] Add some comments to slab.c Also, checks if we get a valid slabp_cache for off slab slab-descriptors. We should always get this. If we don't, then in that case we, will have to disable off-slab descriptors for this cache and do the calculations again. This is a rare case, so add a BUG_ON, for now, just in case. Signed-off-by: Alok N Kataria Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Cc: Pekka Enberg Cc: Manfred Spraul Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index d47d0e186973..3ad2f64998fd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2206,8 +2206,17 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; - if (flags & CFLGS_OFF_SLAB) + if (flags & CFLGS_OFF_SLAB) { cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); + /* + * This is a possibility for one of the malloc_sizes caches. + * But since we go off slab only for object size greater than + * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, + * this should not happen at all. + * But leave a BUG_ON for some lucky dude. + */ + BUG_ON(!cachep->slabp_cache); + } cachep->ctor = ctor; cachep->dtor = dtor; cachep->name = name; @@ -2441,7 +2450,17 @@ int kmem_cache_destroy(struct kmem_cache *cachep) } EXPORT_SYMBOL(kmem_cache_destroy); -/* Get the memory for a slab management obj. */ +/* + * Get the memory for a slab management obj. + * For a slab cache when the slab descriptor is off-slab, slab descriptors + * always come from malloc_sizes caches. The slab descriptor cannot + * come from the same cache which is getting created because, + * when we are searching for an appropriate cache for these + * descriptors in kmem_cache_create, we search through the malloc_sizes array. + * If we are creating a malloc_sizes cache here it would not be visible to + * kmem_find_general_cachep till the initialization is complete. + * Hence we cannot have slabp_cache same as the original cache. + */ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, int colour_off, gfp_t local_flags, int nodeid) @@ -3125,6 +3144,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, if (slabp->inuse == 0) { if (l3->free_objects > l3->free_limit) { l3->free_objects -= cachep->num; + /* No need to drop any previously held + * lock here, even if we have a off-slab slab + * descriptor it is guaranteed to come from + * a different cache, refer to comments before + * alloc_slabmgmt. + */ slab_destroy(cachep, slabp); } else { list_add(&slabp->list, &l3->slabs_free); -- cgit v1.2.3 From dbe5e69d2d6e591996ea2b817b887d03b60bb143 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 25 Sep 2006 23:31:36 -0700 Subject: [PATCH] slab: optimize kmalloc_node the same way as kmalloc [akpm@osdl.org: export fix] Signed-off-by: Christoph Hellwig Acked-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 25 ++++++++++++++++++++++++- mm/slab.c | 4 ++-- 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/include/linux/slab.h b/include/linux/slab.h index 193c03c547ec..2f6bef6a98c9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -202,7 +202,30 @@ extern int slab_is_available(void); #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); -extern void *kmalloc_node(size_t size, gfp_t flags, int node); +extern void *__kmalloc_node(size_t size, gfp_t flags, int node); + +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size)) { + int i = 0; +#define CACHE(x) \ + if (size <= x) \ + goto found; \ + else \ + i++; +#include "kmalloc_sizes.h" +#undef CACHE + { + extern void __you_cannot_kmalloc_that_much(void); + __you_cannot_kmalloc_that_much(); + } +found: + return kmem_cache_alloc_node((flags & GFP_DMA) ? + malloc_sizes[i].cs_dmacachep : + malloc_sizes[i].cs_cachep, flags, node); + } + return __kmalloc_node(size, flags, node); +} #else static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) { diff --git a/mm/slab.c b/mm/slab.c index 3ad2f64998fd..5e59ce7a46c8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3348,7 +3348,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmalloc_node(size_t size, gfp_t flags, int node) +void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *cachep; @@ -3357,7 +3357,7 @@ void *kmalloc_node(size_t size, gfp_t flags, int node) return NULL; return kmem_cache_alloc_node(cachep, flags, node); } -EXPORT_SYMBOL(kmalloc_node); +EXPORT_SYMBOL(__kmalloc_node); #endif /** -- cgit v1.2.3 From 117f6eb1d8b8deb6f19fc88fc15bdb413c2a0c79 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:37 -0700 Subject: [PATCH] slab: extract __kmem_cache_destroy from kmem_cache_destroy The ability to free memory allocated to a slab cache is also useful if an error occurs during setup of a slab. So extract the function. Signed-off-by: Christoph Lameter Cc: Pekka Enberg Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 5e59ce7a46c8..c714741b253b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1833,6 +1833,27 @@ static void set_up_list3s(struct kmem_cache *cachep, int index) } } +static void __kmem_cache_destroy(struct kmem_cache *cachep) +{ + int i; + struct kmem_list3 *l3; + + for_each_online_cpu(i) + kfree(cachep->array[i]); + + /* NUMA: free the list3 structures */ + for_each_online_node(i) { + l3 = cachep->nodelists[i]; + if (l3) { + kfree(l3->shared); + free_alien_cache(l3->alien); + kfree(l3); + } + } + kmem_cache_free(&cache_cache, cachep); +} + + /** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created @@ -2404,9 +2425,6 @@ EXPORT_SYMBOL(kmem_cache_shrink); */ int kmem_cache_destroy(struct kmem_cache *cachep) { - int i; - struct kmem_list3 *l3; - BUG_ON(!cachep || in_interrupt()); /* Don't let CPUs to come and go */ @@ -2432,19 +2450,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep) if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) synchronize_rcu(); - for_each_online_cpu(i) - kfree(cachep->array[i]); - - /* NUMA: free the list3 structures */ - for_each_online_node(i) { - l3 = cachep->nodelists[i]; - if (l3) { - kfree(l3->shared); - free_alien_cache(l3->alien); - kfree(l3); - } - } - kmem_cache_free(&cache_cache, cachep); + __kmem_cache_destroy(cachep); unlock_cpu_hotplug(); return 0; } -- cgit v1.2.3 From 2ed3a4ef95ef1a13a424378c34ebd9b7e593f212 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:38 -0700 Subject: [PATCH] slab: do not panic when alloc_kmemlist fails and slab is up It is fairly easy to get a system to oops by simply sizing a cache via /proc in such a way that one of the chaches (shared is easiest) becomes bigger than the maximum allowed slab allocation size. This occurs because enable_cpucache() fails if it cannot reallocate some caches. However, enable_cpucache() is used for multiple purposes: resizing caches, cache creation and bootstrap. If the slab is already up then we already have working caches. The resize can fail without a problem. We just need to return the proper error code. F.e. after this patch: # echo "size-64 10000 50 1000" >/proc/slabinfo -bash: echo: write error: Cannot allocate memory notice no OOPS. If we are doing a kmem_cache_create() then we also should not panic but return -ENOMEM. If on the other hand we do not have a fully bootstrapped slab allocator yet then we should indeed panic since we are unable to bring up the slab to its full functionality. Signed-off-by: Christoph Lameter Cc: Pekka Enberg Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index c714741b253b..3233c4c7cbce 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, struct kmem_list3 *l3, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); -static void enable_cpucache(struct kmem_cache *cachep); +static int enable_cpucache(struct kmem_cache *cachep); static void cache_reap(void *unused); /* @@ -1490,7 +1490,8 @@ void __init kmem_cache_init(void) struct kmem_cache *cachep; mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) - enable_cpucache(cachep); + if (enable_cpucache(cachep)) + BUG(); mutex_unlock(&cache_chain_mutex); } @@ -1924,12 +1925,11 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, return left_over; } -static void setup_cpu_cache(struct kmem_cache *cachep) +static int setup_cpu_cache(struct kmem_cache *cachep) { - if (g_cpucache_up == FULL) { - enable_cpucache(cachep); - return; - } + if (g_cpucache_up == FULL) + return enable_cpucache(cachep); + if (g_cpucache_up == NONE) { /* * Note: the first kmem_cache_create must create the cache @@ -1976,6 +1976,7 @@ static void setup_cpu_cache(struct kmem_cache *cachep) cpu_cache_get(cachep)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; + return 0; } /** @@ -2242,8 +2243,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->dtor = dtor; cachep->name = name; - - setup_cpu_cache(cachep); + if (setup_cpu_cache(cachep)) { + __kmem_cache_destroy(cachep); + cachep = NULL; + goto oops; + } /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); @@ -3693,7 +3697,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared) { struct ccupdate_struct new; - int i, err; + int i; memset(&new.new, 0, sizeof(new.new)); for_each_online_cpu(i) { @@ -3724,17 +3728,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, kfree(ccold); } - err = alloc_kmemlist(cachep); - if (err) { - printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", - cachep->name, -err); - BUG(); - } - return 0; + return alloc_kmemlist(cachep); } /* Called with cache_chain_mutex held always */ -static void enable_cpucache(struct kmem_cache *cachep) +static int enable_cpucache(struct kmem_cache *cachep) { int err; int limit, shared; @@ -3786,6 +3784,7 @@ static void enable_cpucache(struct kmem_cache *cachep) if (err) printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", cachep->name, -err); + return err; } /* -- cgit v1.2.3 From 056c62418cc639bf2fe962c6a6ee56054b838bc7 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 25 Sep 2006 23:31:38 -0700 Subject: [PATCH] slab: fix lockdep warnings Place the alien array cache locks of on slab malloc slab caches on a seperate lockdep class. This avoids false positives from lockdep [akpm@osdl.org: build fix] Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Cc: Thomas Gleixner Acked-by: Arjan van de Ven Cc: Ingo Molnar Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 57 +++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 14 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 3233c4c7cbce..2b37a62f6314 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = { #endif }; +#define BAD_ALIEN_MAGIC 0x01020304ul + #ifdef CONFIG_LOCKDEP /* @@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = { * The locking for this is tricky in that it nests within the locks * of all other slabs in a few places; to deal with this special * locking we put on-slab caches into a separate lock-class. + * + * We set lock class for alien array caches which are up during init. + * The lock annotation will be lost if all cpus of a node goes down and + * then comes back up during hotplug */ -static struct lock_class_key on_slab_key; +static struct lock_class_key on_slab_l3_key; +static struct lock_class_key on_slab_alc_key; + +static inline void init_lock_keys(void) -static inline void init_lock_keys(struct cache_sizes *s) { int q; - - for (q = 0; q < MAX_NUMNODES; q++) { - if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep)) - continue; - lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock, - &on_slab_key); + struct cache_sizes *s = malloc_sizes; + + while (s->cs_size != ULONG_MAX) { + for_each_node(q) { + struct array_cache **alc; + int r; + struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; + if (!l3 || OFF_SLAB(s->cs_cachep)) + continue; + lockdep_set_class(&l3->list_lock, &on_slab_l3_key); + alc = l3->alien; + /* + * FIXME: This check for BAD_ALIEN_MAGIC + * should go away when common slab code is taught to + * work even without alien caches. + * Currently, non NUMA code returns BAD_ALIEN_MAGIC + * for alloc_alien_cache, + */ + if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) + continue; + for_each_node(r) { + if (alc[r]) + lockdep_set_class(&alc[r]->lock, + &on_slab_alc_key); + } + } + s++; } } - #else -static inline void init_lock_keys(struct cache_sizes *s) +static inline void init_lock_keys(void) { } #endif - - /* Guard access to the cache-chain. */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; @@ -1091,7 +1117,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) static inline struct array_cache **alloc_alien_cache(int node, int limit) { - return (struct array_cache **) 0x01020304ul; + return (struct array_cache **)BAD_ALIEN_MAGIC; } static inline void free_alien_cache(struct array_cache **ac_ptr) @@ -1421,7 +1447,6 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL, NULL); } - init_lock_keys(sizes); sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_size, @@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void) mutex_unlock(&cache_chain_mutex); } + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + + /* Done! */ g_cpucache_up = FULL; -- cgit v1.2.3 From d2e7b7d0aa021847c59f882b066e7d3812902870 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Mon, 25 Sep 2006 23:31:47 -0700 Subject: [PATCH] fix potential stack overflow in mm/slab.c On High end systems (1024 or so cpus) this can potentially cause stack overflow. Fix the stack usage. Signed-off-by: Suresh Siddha Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 2b37a62f6314..619337a5cb2b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3725,22 +3725,26 @@ static void do_ccupdate_local(void *info) static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared) { - struct ccupdate_struct new; + struct ccupdate_struct *new; int i; - memset(&new.new, 0, sizeof(new.new)); + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + for_each_online_cpu(i) { - new.new[i] = alloc_arraycache(cpu_to_node(i), limit, + new->new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); - if (!new.new[i]) { + if (!new->new[i]) { for (i--; i >= 0; i--) - kfree(new.new[i]); + kfree(new->new[i]); + kfree(new); return -ENOMEM; } } - new.cachep = cachep; + new->cachep = cachep; - on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1); + on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); check_irq_on(); cachep->batchcount = batchcount; @@ -3748,7 +3752,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, cachep->shared = shared; for_each_online_cpu(i) { - struct array_cache *ccold = new.new[i]; + struct array_cache *ccold = new->new[i]; if (!ccold) continue; spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); @@ -3756,7 +3760,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); kfree(ccold); } - + kfree(new); return alloc_kmemlist(cachep); } @@ -4274,6 +4278,7 @@ static int leaks_show(struct seq_file *m, void *p) show_symbol(m, n[2*i+2]); seq_putc(m, '\n'); } + return 0; } -- cgit v1.2.3 From d00bcc98d7ec2c87391c9d9e1cca519ef64d33ef Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:50 -0700 Subject: [PATCH] Extract the allocpercpu functions from the slab allocator The allocpercpu functions __alloc_percpu and __free_percpu() are heavily using the slab allocator. However, they are conceptually slab. This also simplifies SLOB (at this point slob may be broken in mm. This should fix it). Signed-off-by: Christoph Lameter Cc: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Makefile | 2 +- mm/allocpercpu.c | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.c | 124 ---------------------------------------------------- mm/slob.c | 45 ------------------- 4 files changed, 130 insertions(+), 170 deletions(-) create mode 100644 mm/allocpercpu.c (limited to 'mm/slab.c') diff --git a/mm/Makefile b/mm/Makefile index 9dd824c11eeb..60c56c0b5e10 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -23,4 +23,4 @@ obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o - +obj-$(CONFIG_SMP) += allocpercpu.o diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c new file mode 100644 index 000000000000..eaa9abeea536 --- /dev/null +++ b/mm/allocpercpu.c @@ -0,0 +1,129 @@ +/* + * linux/mm/allocpercpu.c + * + * Separated from slab.c August 11, 2006 Christoph Lameter + */ +#include +#include + +/** + * percpu_depopulate - depopulate per-cpu data for given cpu + * @__pdata: per-cpu data to depopulate + * @cpu: depopulate per-cpu data for this cpu + * + * Depopulating per-cpu data for a cpu going offline would be a typical + * use case. You need to register a cpu hotplug handler for that purpose. + */ +void percpu_depopulate(void *__pdata, int cpu) +{ + struct percpu_data *pdata = __percpu_disguise(__pdata); + if (pdata->ptrs[cpu]) { + kfree(pdata->ptrs[cpu]); + pdata->ptrs[cpu] = NULL; + } +} +EXPORT_SYMBOL_GPL(percpu_depopulate); + +/** + * percpu_depopulate_mask - depopulate per-cpu data for some cpu's + * @__pdata: per-cpu data to depopulate + * @mask: depopulate per-cpu data for cpu's selected through mask bits + */ +void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) +{ + int cpu; + for_each_cpu_mask(cpu, *mask) + percpu_depopulate(__pdata, cpu); +} +EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); + +/** + * percpu_populate - populate per-cpu data for given cpu + * @__pdata: per-cpu data to populate further + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @cpu: populate per-data for this cpu + * + * Populating per-cpu data for a cpu coming online would be a typical + * use case. You need to register a cpu hotplug handler for that purpose. + * Per-cpu object is populated with zeroed buffer. + */ +void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) +{ + struct percpu_data *pdata = __percpu_disguise(__pdata); + int node = cpu_to_node(cpu); + + BUG_ON(pdata->ptrs[cpu]); + if (node_online(node)) { + /* FIXME: kzalloc_node(size, gfp, node) */ + pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); + if (pdata->ptrs[cpu]) + memset(pdata->ptrs[cpu], 0, size); + } else + pdata->ptrs[cpu] = kzalloc(size, gfp); + return pdata->ptrs[cpu]; +} +EXPORT_SYMBOL_GPL(percpu_populate); + +/** + * percpu_populate_mask - populate per-cpu data for more cpu's + * @__pdata: per-cpu data to populate further + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @mask: populate per-cpu data for cpu's selected through mask bits + * + * Per-cpu objects are populated with zeroed buffers. + */ +int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, + cpumask_t *mask) +{ + cpumask_t populated = CPU_MASK_NONE; + int cpu; + + for_each_cpu_mask(cpu, *mask) + if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { + __percpu_depopulate_mask(__pdata, &populated); + return -ENOMEM; + } else + cpu_set(cpu, populated); + return 0; +} +EXPORT_SYMBOL_GPL(__percpu_populate_mask); + +/** + * percpu_alloc_mask - initial setup of per-cpu data + * @size: size of per-cpu object + * @gfp: may sleep or not etc. + * @mask: populate per-data for cpu's selected through mask bits + * + * Populating per-cpu data for all online cpu's would be a typical use case, + * which is simplified by the percpu_alloc() wrapper. + * Per-cpu objects are populated with zeroed buffers. + */ +void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) +{ + void *pdata = kzalloc(sizeof(struct percpu_data), gfp); + void *__pdata = __percpu_disguise(pdata); + + if (unlikely(!pdata)) + return NULL; + if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) + return __pdata; + kfree(pdata); + return NULL; +} +EXPORT_SYMBOL_GPL(__percpu_alloc_mask); + +/** + * percpu_free - final cleanup of per-cpu data + * @__pdata: object to clean up + * + * We simply clean up any per-cpu object left. No need for the client to + * track and specify through a bis mask which per-cpu objects are to free. + */ +void percpu_free(void *__pdata) +{ + __percpu_depopulate_mask(__pdata, &cpu_possible_map); + kfree(__percpu_disguise(__pdata)); +} +EXPORT_SYMBOL_GPL(percpu_free); diff --git a/mm/slab.c b/mm/slab.c index 619337a5cb2b..13b5050f84cc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3440,130 +3440,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) EXPORT_SYMBOL(__kmalloc_track_caller); #endif -#ifdef CONFIG_SMP -/** - * percpu_depopulate - depopulate per-cpu data for given cpu - * @__pdata: per-cpu data to depopulate - * @cpu: depopulate per-cpu data for this cpu - * - * Depopulating per-cpu data for a cpu going offline would be a typical - * use case. You need to register a cpu hotplug handler for that purpose. - */ -void percpu_depopulate(void *__pdata, int cpu) -{ - struct percpu_data *pdata = __percpu_disguise(__pdata); - if (pdata->ptrs[cpu]) { - kfree(pdata->ptrs[cpu]); - pdata->ptrs[cpu] = NULL; - } -} -EXPORT_SYMBOL_GPL(percpu_depopulate); - -/** - * percpu_depopulate_mask - depopulate per-cpu data for some cpu's - * @__pdata: per-cpu data to depopulate - * @mask: depopulate per-cpu data for cpu's selected through mask bits - */ -void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) -{ - int cpu; - for_each_cpu_mask(cpu, *mask) - percpu_depopulate(__pdata, cpu); -} -EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); - -/** - * percpu_populate - populate per-cpu data for given cpu - * @__pdata: per-cpu data to populate further - * @size: size of per-cpu object - * @gfp: may sleep or not etc. - * @cpu: populate per-data for this cpu - * - * Populating per-cpu data for a cpu coming online would be a typical - * use case. You need to register a cpu hotplug handler for that purpose. - * Per-cpu object is populated with zeroed buffer. - */ -void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) -{ - struct percpu_data *pdata = __percpu_disguise(__pdata); - int node = cpu_to_node(cpu); - - BUG_ON(pdata->ptrs[cpu]); - if (node_online(node)) { - /* FIXME: kzalloc_node(size, gfp, node) */ - pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); - if (pdata->ptrs[cpu]) - memset(pdata->ptrs[cpu], 0, size); - } else - pdata->ptrs[cpu] = kzalloc(size, gfp); - return pdata->ptrs[cpu]; -} -EXPORT_SYMBOL_GPL(percpu_populate); - -/** - * percpu_populate_mask - populate per-cpu data for more cpu's - * @__pdata: per-cpu data to populate further - * @size: size of per-cpu object - * @gfp: may sleep or not etc. - * @mask: populate per-cpu data for cpu's selected through mask bits - * - * Per-cpu objects are populated with zeroed buffers. - */ -int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask) -{ - cpumask_t populated = CPU_MASK_NONE; - int cpu; - - for_each_cpu_mask(cpu, *mask) - if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { - __percpu_depopulate_mask(__pdata, &populated); - return -ENOMEM; - } else - cpu_set(cpu, populated); - return 0; -} -EXPORT_SYMBOL_GPL(__percpu_populate_mask); - -/** - * percpu_alloc_mask - initial setup of per-cpu data - * @size: size of per-cpu object - * @gfp: may sleep or not etc. - * @mask: populate per-data for cpu's selected through mask bits - * - * Populating per-cpu data for all online cpu's would be a typical use case, - * which is simplified by the percpu_alloc() wrapper. - * Per-cpu objects are populated with zeroed buffers. - */ -void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) -{ - void *pdata = kzalloc(sizeof(struct percpu_data), gfp); - void *__pdata = __percpu_disguise(pdata); - - if (unlikely(!pdata)) - return NULL; - if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) - return __pdata; - kfree(pdata); - return NULL; -} -EXPORT_SYMBOL_GPL(__percpu_alloc_mask); - -/** - * percpu_free - final cleanup of per-cpu data - * @__pdata: object to clean up - * - * We simply clean up any per-cpu object left. No need for the client to - * track and specify through a bis mask which per-cpu objects are to free. - */ -void percpu_free(void *__pdata) -{ - __percpu_depopulate_mask(__pdata, &cpu_possible_map); - kfree(__percpu_disguise(__pdata)); -} -EXPORT_SYMBOL_GPL(percpu_free); -#endif /* CONFIG_SMP */ - /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. diff --git a/mm/slob.c b/mm/slob.c index 7b52b20b9607..4c28a421b270 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -343,48 +343,3 @@ void kmem_cache_init(void) atomic_t slab_reclaim_pages = ATOMIC_INIT(0); EXPORT_SYMBOL(slab_reclaim_pages); -#ifdef CONFIG_SMP - -void *__alloc_percpu(size_t size) -{ - int i; - struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); - - if (!pdata) - return NULL; - - for_each_possible_cpu(i) { - pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); - if (!pdata->ptrs[i]) - goto unwind_oom; - memset(pdata->ptrs[i], 0, size); - } - - /* Catch derefs w/o wrappers */ - return (void *) (~(unsigned long) pdata); - -unwind_oom: - while (--i >= 0) { - if (!cpu_possible(i)) - continue; - kfree(pdata->ptrs[i]); - } - kfree(pdata); - return NULL; -} -EXPORT_SYMBOL(__alloc_percpu); - -void -free_percpu(const void *objp) -{ - int i; - struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); - - for_each_possible_cpu(i) - kfree(p->ptrs[i]); - - kfree(p); -} -EXPORT_SYMBOL(free_percpu); - -#endif -- cgit v1.2.3 From 972d1a7b140569084439a81265a0f15b74e924e0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:51 -0700 Subject: [PATCH] ZVC: Support NR_SLAB_RECLAIMABLE / NR_SLAB_UNRECLAIMABLE Remove the atomic counter for slab_reclaim_pages and replace the counter and NR_SLAB with two ZVC counter that account for unreclaimable and reclaimable slab pages: NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE. Change the check in vmscan.c to refer to to NR_SLAB_RECLAIMABLE. The intend seems to be to check for slab pages that could be freed. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/mm/pgtable.c | 4 +++- drivers/base/node.c | 9 +++++++-- fs/proc/proc_misc.c | 7 ++++++- include/linux/mmzone.h | 3 ++- include/linux/slab.h | 2 -- mm/mmap.c | 2 +- mm/nommu.c | 2 +- mm/page_alloc.c | 3 ++- mm/slab.c | 24 +++++++++++------------- mm/slob.c | 4 ---- mm/vmscan.c | 2 +- mm/vmstat.c | 3 ++- 12 files changed, 36 insertions(+), 29 deletions(-) (limited to 'mm/slab.c') diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index bd98768d8764..a9f4910a22f8 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -60,7 +60,9 @@ void show_mem(void) printk(KERN_INFO "%lu pages writeback\n", global_page_state(NR_WRITEBACK)); printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); - printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); + printk(KERN_INFO "%lu pages slab\n", + global_page_state(NR_SLAB_RECLAIMABLE) + + global_page_state(NR_SLAB_UNRECLAIMABLE)); printk(KERN_INFO "%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); } diff --git a/drivers/base/node.c b/drivers/base/node.c index e09f5c2c11ee..001e6f6b9c1b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -68,7 +68,9 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) "Node %d PageTables: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" - "Node %d Slab: %8lu kB\n", + "Node %d Slab: %8lu kB\n" + "Node %d SReclaimable: %8lu kB\n" + "Node %d SUnreclaim: %8lu kB\n", nid, K(i.totalram), nid, K(i.freeram), nid, K(i.totalram - i.freeram), @@ -88,7 +90,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) nid, K(node_page_state(nid, NR_PAGETABLE)), nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), nid, K(node_page_state(nid, NR_BOUNCE)), - nid, K(node_page_state(nid, NR_SLAB))); + nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + + node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); n += hugetlb_report_node_meminfo(nid, buf + n); return n; } diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index caa0a51560a0..5bbd60896050 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -170,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Slab: %8lu kB\n" + "SReclaimable: %8lu kB\n" + "SUnreclaim: %8lu kB\n" "PageTables: %8lu kB\n" "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" @@ -197,7 +199,10 @@ static int meminfo_read_proc(char *page, char **start, off_t off, K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), - K(global_page_state(NR_SLAB)), + K(global_page_state(NR_SLAB_RECLAIMABLE) + + global_page_state(NR_SLAB_UNRECLAIMABLE)), + K(global_page_state(NR_SLAB_RECLAIMABLE)), + K(global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a703527e2b45..08c41b9f92e0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -51,7 +51,8 @@ enum zone_stat_item { NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, - NR_SLAB, /* Pages used by slab allocator */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ NR_FILE_DIRTY, NR_WRITEBACK, diff --git a/include/linux/slab.h b/include/linux/slab.h index 2f6bef6a98c9..66d6eb78d1c6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -284,8 +284,6 @@ extern kmem_cache_t *fs_cachep; extern kmem_cache_t *sighand_cachep; extern kmem_cache_t *bio_cachep; -extern atomic_t slab_reclaim_pages; - #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ diff --git a/mm/mmap.c b/mm/mmap.c index 8507ee9cd573..eea8eefd51a8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -116,7 +116,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ - free += atomic_read(&slab_reclaim_pages); + free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave the last 3% for root diff --git a/mm/nommu.c b/mm/nommu.c index c576df71e3bb..d99dea31e443 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1133,7 +1133,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ - free += atomic_read(&slab_reclaim_pages); + free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave the last 3% for root diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5da6bc4e0a6b..47e98423b30d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1304,7 +1304,8 @@ void show_free_areas(void) global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), nr_free_pages(), - global_page_state(NR_SLAB), + global_page_state(NR_SLAB_RECLAIMABLE) + + global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_FILE_MAPPED), global_page_state(NR_PAGETABLE)); diff --git a/mm/slab.c b/mm/slab.c index 13b5050f84cc..7a48eb1a60c8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -735,14 +735,6 @@ static inline void init_lock_keys(void) static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; -/* - * vm_enough_memory() looks at this to determine how many slab-allocated pages - * are possibly freeable under pressure - * - * SLAB_RECLAIM_ACCOUNT turns this on per-slab - */ -atomic_t slab_reclaim_pages; - /* * chicken and egg problem: delay the per-cpu array allocation * until the general caches are up. @@ -1580,8 +1572,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_add(nr_pages, &slab_reclaim_pages); - add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); + add_zone_page_state(page_zone(page), + NR_SLAB_RECLAIMABLE, nr_pages); + else + add_zone_page_state(page_zone(page), + NR_SLAB_UNRECLAIMABLE, nr_pages); for (i = 0; i < nr_pages; i++) __SetPageSlab(page + i); return page_address(page); @@ -1596,7 +1591,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; - sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + sub_zone_page_state(page_zone(page), + NR_SLAB_RECLAIMABLE, nr_freed); + else + sub_zone_page_state(page_zone(page), + NR_SLAB_UNRECLAIMABLE, nr_freed); while (i--) { BUG_ON(!PageSlab(page)); __ClearPageSlab(page); @@ -1605,8 +1605,6 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; free_pages((unsigned long)addr, cachep->gfporder); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); } static void kmem_rcu_free(struct rcu_head *head) diff --git a/mm/slob.c b/mm/slob.c index 4c28a421b270..20188627347c 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -339,7 +339,3 @@ void kmem_cache_init(void) mod_timer(&slob_timer, jiffies + HZ); } - -atomic_t slab_reclaim_pages = ATOMIC_INIT(0); -EXPORT_SYMBOL(slab_reclaim_pages); - diff --git a/mm/vmscan.c b/mm/vmscan.c index 5154c25e8440..349797ba4bac 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1378,7 +1378,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) for_each_zone(zone) lru_pages += zone->nr_active + zone->nr_inactive; - nr_slab = global_page_state(NR_SLAB); + nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { reclaim_state.reclaimed_slab = 0; diff --git a/mm/vmstat.c b/mm/vmstat.c index 968c0072e19a..490d8c1a0ded 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -458,7 +458,8 @@ static char *vmstat_text[] = { "nr_anon_pages", "nr_mapped", "nr_file_pages", - "nr_slab", + "nr_slab_reclaimable", + "nr_slab_unreclaimable", "nr_page_table_pages", "nr_dirty", "nr_writeback", -- cgit v1.2.3 From 133d205a18b7a4d8cb52959c5310f6664277cf61 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 27 Sep 2006 01:49:41 -0700 Subject: [PATCH] Make kmem_cache_destroy() return void un-, de-, -free, -destroy, -exit, etc functions should in general return void. Also, There is very little, say, filesystem driver code can do upon failed kmem_cache_destroy(). If it will be decided to BUG in this case, BUG should be put in generic code, instead. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 4 ++-- mm/slab.c | 6 ++---- mm/slob.c | 3 +-- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'mm/slab.c') diff --git a/include/linux/slab.h b/include/linux/slab.h index 66d6eb78d1c6..a96fd9310d55 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -60,7 +60,7 @@ extern void __init kmem_cache_init(void); extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *, kmem_cache_t *, unsigned long), void (*)(void *, kmem_cache_t *, unsigned long)); -extern int kmem_cache_destroy(kmem_cache_t *); +extern void kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); @@ -249,7 +249,7 @@ struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, unsigned long, void (*)(void *, struct kmem_cache *, unsigned long), void (*)(void *, struct kmem_cache *, unsigned long)); -int kmem_cache_destroy(struct kmem_cache *c); +void kmem_cache_destroy(struct kmem_cache *c); void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags); void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); void kmem_cache_free(struct kmem_cache *c, void *b); diff --git a/mm/slab.c b/mm/slab.c index 7a48eb1a60c8..c52ebf9c4462 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2442,7 +2442,6 @@ EXPORT_SYMBOL(kmem_cache_shrink); * @cachep: the cache to destroy * * Remove a struct kmem_cache object from the slab cache. - * Returns 0 on success. * * It is expected this function will be called by a module when it is * unloaded. This will remove the cache completely, and avoid a duplicate @@ -2454,7 +2453,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */ -int kmem_cache_destroy(struct kmem_cache *cachep) +void kmem_cache_destroy(struct kmem_cache *cachep) { BUG_ON(!cachep || in_interrupt()); @@ -2475,7 +2474,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep) list_add(&cachep->next, &cache_chain); mutex_unlock(&cache_chain_mutex); unlock_cpu_hotplug(); - return 1; + return; } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) @@ -2483,7 +2482,6 @@ int kmem_cache_destroy(struct kmem_cache *cachep) __kmem_cache_destroy(cachep); unlock_cpu_hotplug(); - return 0; } EXPORT_SYMBOL(kmem_cache_destroy); diff --git a/mm/slob.c b/mm/slob.c index 20188627347c..542394184a58 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -270,10 +270,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, } EXPORT_SYMBOL(kmem_cache_create); -int kmem_cache_destroy(struct kmem_cache *c) +void kmem_cache_destroy(struct kmem_cache *c) { slob_free(c, sizeof(struct kmem_cache)); - return 0; } EXPORT_SYMBOL(kmem_cache_destroy); -- cgit v1.2.3 From de3083ec3e6bfb1ab60bc8a410f37702529f953c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 27 Sep 2006 01:50:03 -0700 Subject: [PATCH] slab: fix kmalloc_node applying memory policies if nodeid == numa_node_id() kmalloc_node() falls back to ___cache_alloc() under certain conditions and at that point memory policies may be applied redirecting the allocation away from the current node. Therefore kmalloc_node(...,numa_node_id()) or kmalloc_node(...,-1) may not return memory from the local node. Fix this by doing the policy check in __cache_alloc() instead of ____cache_alloc(). This version here is a cleanup of Kiran's patch. - Tested on ia64. - Extra material removed. - Consolidate the exit path if alternate_node_alloc() returned an object. [akpm@osdl.org: warning fix] Signed-off-by: Alok N Kataria Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index c52ebf9c4462..69e11c45002f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3028,14 +3028,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) void *objp; struct array_cache *ac; -#ifdef CONFIG_NUMA - if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { - objp = alternate_node_alloc(cachep, flags); - if (objp != NULL) - return objp; - } -#endif - check_irq_off(); ac = cpu_cache_get(cachep); if (likely(ac->avail)) { @@ -3053,12 +3045,19 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) { unsigned long save_flags; - void *objp; + void *objp = NULL; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - objp = ____cache_alloc(cachep, flags); + +#ifdef CONFIG_NUMA + if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) + objp = alternate_node_alloc(cachep, flags); +#endif + + if (!objp) + objp = ____cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); -- cgit v1.2.3 From 765c4507af71c39aba21006bbd3ec809fe9714ff Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 27 Sep 2006 01:50:08 -0700 Subject: [PATCH] GFP_THISNODE for the slab allocator This patch insures that the slab node lists in the NUMA case only contain slabs that belong to that specific node. All slab allocations use GFP_THISNODE when calling into the page allocator. If an allocation fails then we fall back in the slab allocator according to the zonelists appropriate for a certain context. This allows a replication of the behavior of alloc_pages and alloc_pages node in the slab layer. Currently allocations requested from the page allocator may be redirected via cpusets to other nodes. This results in remote pages on nodelists and that in turn results in interrupt latency issues during cache draining. Plus the slab is handing out memory as local when it is really remote. Fallback for slab memory allocations will occur within the slab allocator and not in the page allocator. This is necessary in order to be able to use the existing pools of objects on the nodes that we fall back to before adding more pages to a slab. The fallback function insures that the nodes we fall back to obey cpuset restrictions of the current context. We do not allocate objects from outside of the current cpuset context like before. Note that the implementation of locality constraints within the slab allocator requires importing logic from the page allocator. This is a mischmash that is not that great. Other allocators (uncached allocator, vmalloc, huge pages) face similar problems and have similar minimal reimplementations of the basic fallback logic of the page allocator. There is another way of implementing a slab by avoiding per node lists (see modular slab) but this wont work within the existing slab. V1->V2: - Use NUMA_BUILD to avoid #ifdef CONFIG_NUMA - Exploit GFP_THISNODE being 0 in the NON_NUMA case to avoid another #ifdef [akpm@osdl.org: build fix] Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 4 ++- mm/slab.c | 107 +++++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 81 insertions(+), 30 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 38f89650bc84..cf18f0942553 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1136,7 +1136,9 @@ static unsigned interleave_nodes(struct mempolicy *policy) */ unsigned slab_node(struct mempolicy *policy) { - switch (policy->policy) { + int pol = policy ? policy->policy : MPOL_DEFAULT; + + switch (pol) { case MPOL_INTERLEAVE: return interleave_nodes(policy); diff --git a/mm/slab.c b/mm/slab.c index 69e11c45002f..792bfe320a8b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -972,7 +972,39 @@ static int transfer_objects(struct array_cache *to, return nr; } -#ifdef CONFIG_NUMA +#ifndef CONFIG_NUMA + +#define drain_alien_cache(cachep, alien) do { } while (0) +#define reap_alien(cachep, l3) do { } while (0) + +static inline struct array_cache **alloc_alien_cache(int node, int limit) +{ + return (struct array_cache **)BAD_ALIEN_MAGIC; +} + +static inline void free_alien_cache(struct array_cache **ac_ptr) +{ +} + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + return 0; +} + +static inline void *alternate_node_alloc(struct kmem_cache *cachep, + gfp_t flags) +{ + return NULL; +} + +static inline void *__cache_alloc_node(struct kmem_cache *cachep, + gfp_t flags, int nodeid) +{ + return NULL; +} + +#else /* CONFIG_NUMA */ + static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); @@ -1101,26 +1133,6 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) } return 1; } - -#else - -#define drain_alien_cache(cachep, alien) do { } while (0) -#define reap_alien(cachep, l3) do { } while (0) - -static inline struct array_cache **alloc_alien_cache(int node, int limit) -{ - return (struct array_cache **)BAD_ALIEN_MAGIC; -} - -static inline void free_alien_cache(struct array_cache **ac_ptr) -{ -} - -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) -{ - return 0; -} - #endif static int __cpuinit cpuup_callback(struct notifier_block *nfb, @@ -1564,7 +1576,13 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) */ flags |= __GFP_COMP; #endif - flags |= cachep->gfpflags; + + /* + * Under NUMA we want memory on the indicated node. We will handle + * the needed fallback ourselves since we want to serve from our + * per node object lists first for other nodes. + */ + flags |= cachep->gfpflags | GFP_THISNODE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) @@ -3051,13 +3069,18 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, local_irq_save(save_flags); -#ifdef CONFIG_NUMA - if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) + if (unlikely(NUMA_BUILD && + current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) objp = alternate_node_alloc(cachep, flags); -#endif if (!objp) objp = ____cache_alloc(cachep, flags); + /* + * We may just have run out of memory on the local node. + * __cache_alloc_node() knows how to locate memory on other nodes + */ + if (NUMA_BUILD && !objp) + objp = __cache_alloc_node(cachep, flags, numa_node_id()); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); @@ -3076,7 +3099,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { int nid_alloc, nid_here; - if (in_interrupt()) + if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; nid_alloc = nid_here = numa_node_id(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) @@ -3088,6 +3111,28 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) return NULL; } +/* + * Fallback function if there was no memory available and no objects on a + * certain node and we are allowed to fall back. We mimick the behavior of + * the page allocator. We fall back according to a zonelist determined by + * the policy layer while obeying cpuset constraints. + */ +void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) +{ + struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) + ->node_zonelists[gfp_zone(flags)]; + struct zone **z; + void *obj = NULL; + + for (z = zonelist->zones; *z && !obj; z++) + if (zone_idx(*z) <= ZONE_NORMAL && + cpuset_zone_allowed(*z, flags)) + obj = __cache_alloc_node(cache, + flags | __GFP_THISNODE, + zone_to_nid(*z)); + return obj; +} + /* * A interface to enable slab creation on nodeid */ @@ -3141,11 +3186,15 @@ retry: must_grow: spin_unlock(&l3->list_lock); x = cache_grow(cachep, flags, nodeid); + if (x) + goto retry; - if (!x) - return NULL; + if (!(flags & __GFP_THISNODE)) + /* Unable to grow the cache. Fall back to other nodes. */ + return fallback_alloc(cachep, flags); + + return NULL; - goto retry; done: return obj; } -- cgit v1.2.3