diff options
author | Christoph Lameter <cl@linux.com> | 2012-07-07 00:25:12 +0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-07-09 13:13:41 +0400 |
commit | 18004c5d4084d965aa1396392706b8688306427a (patch) | |
tree | ae480cb4514cbddf38ee43ec4513f59cfa42c3d7 /mm/slab.c | |
parent | 97d06609158e61f6bdf538c4a6788e2de492236f (diff) | |
download | linux-18004c5d4084d965aa1396392706b8688306427a.tar.xz |
mm, sl[aou]b: Use a common mutex definition
Use the mutex definition from SLAB and make it the common way to take a sleeping lock.
This has the effect of using a mutex instead of a rw semaphore for SLUB.
SLOB gains the use of a mutex for kmem_cache_create serialization.
Not needed now but SLOB may acquire some more features later (like slabinfo
/ sysfs support) through the expansion of the common code that will
need this.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 108 |
1 files changed, 51 insertions, 57 deletions
diff --git a/mm/slab.c b/mm/slab.c index 59a466b85b0f..fd7dac67c26e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -68,7 +68,7 @@ * Further notes from the original documentation: * * 11 April '97. Started multi-threading - markhe - * The global cache-chain is protected by the mutex 'cache_chain_mutex'. + * The global cache-chain is protected by the mutex 'slab_mutex'. * The sem is only needed when accessing/extending the cache-chain, which * can never happen inside an interrupt (kmem_cache_create(), * kmem_cache_shrink() and kmem_cache_reap()). @@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) } #endif -/* - * Guard access to the cache-chain. - */ -static DEFINE_MUTEX(cache_chain_mutex); -static struct list_head cache_chain; - static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) @@ -1100,7 +1094,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) * When hotplugging memory or a cpu, existing nodelists are not replaced if * already in use. * - * Must hold cache_chain_mutex. + * Must hold slab_mutex. */ static int init_cache_nodelists_node(int node) { @@ -1108,7 +1102,7 @@ static int init_cache_nodelists_node(int node) struct kmem_list3 *l3; const int memsize = sizeof(struct kmem_list3); - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { /* * Set up the size64 kmemlist for cpu before we can * begin anything. Make sure some other cpu on this @@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node) /* * The l3s don't come and go as CPUs come and - * go. cache_chain_mutex is sufficient + * go. slab_mutex is sufficient * protection here. */ cachep->nodelists[node] = l3; @@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu) int node = cpu_to_mem(cpu); const struct cpumask *mask = cpumask_of_node(node); - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; @@ -1196,7 +1190,7 @@ free_array_cache: * the respective cache's slabs, now we can go ahead and * shrink each nodelist to its limit. */ - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { l3 = cachep->nodelists[node]; if (!l3) continue; @@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu) * Now we can go ahead with allocating the shared arrays and * array caches */ - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { struct array_cache *nc; struct array_cache *shared = NULL; struct array_cache **alien = NULL; @@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); err = cpuup_prepare(cpu); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: /* - * Shutdown cache reaper. Note that the cache_chain_mutex is + * Shutdown cache reaper. Note that the slab_mutex is * held so that if cache_reap() is invoked it cannot do * anything expensive but will only modify reap_work * and reschedule the timer. @@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, #endif case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); cpuup_canceled(cpu); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); break; } return notifier_from_errno(err); @@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = { * Returns -EBUSY if all objects cannot be drained so that the node is not * removed. * - * Must hold cache_chain_mutex. + * Must hold slab_mutex. */ static int __meminit drain_cache_nodelists_node(int node) { struct kmem_cache *cachep; int ret = 0; - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { struct kmem_list3 *l3; l3 = cachep->nodelists[node]; @@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self, switch (action) { case MEM_GOING_ONLINE: - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); ret = init_cache_nodelists_node(nid); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); break; case MEM_GOING_OFFLINE: - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); ret = drain_cache_nodelists_node(nid); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); break; case MEM_ONLINE: case MEM_OFFLINE: @@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void) node = numa_mem_id(); /* 1) create the cache_cache */ - INIT_LIST_HEAD(&cache_chain); - list_add(&cache_cache.list, &cache_chain); + INIT_LIST_HEAD(&slab_caches); + list_add(&cache_cache.list, &slab_caches); cache_cache.colour_off = cache_line_size(); cache_cache.array[smp_processor_id()] = &initarray_cache.cache; cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; @@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void) init_lock_keys(); /* 6) resize the head arrays to their final sizes */ - mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, list) + mutex_lock(&slab_mutex); + list_for_each_entry(cachep, &slab_caches, list) if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); /* Done! */ slab_state = FULL; @@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, */ if (slab_is_available()) { get_online_cpus(); - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); } - list_for_each_entry(pc, &cache_chain, list) { + list_for_each_entry(pc, &slab_caches, list) { char tmp; int res; @@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, } /* cache setup completed, link it into the list */ - list_add(&cachep->list, &cache_chain); + list_add(&cachep->list, &slab_caches); oops: if (slab_is_available()) { - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); put_online_cpus(); } return cachep; @@ -2622,7 +2616,7 @@ out: return nr_freed; } -/* Called with cache_chain_mutex held to protect against cpu hotplug */ +/* Called with slab_mutex held to protect against cpu hotplug */ static int __cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; @@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep) BUG_ON(!cachep || in_interrupt()); get_online_cpus(); - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); ret = __cache_shrink(cachep); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); put_online_cpus(); return ret; } @@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep) /* Find the cache in the chain of caches. */ get_online_cpus(); - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); /* * the chain is never empty, cache_cache is never destroyed */ list_del(&cachep->list); if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); - list_add(&cachep->list, &cache_chain); - mutex_unlock(&cache_chain_mutex); + list_add(&cachep->list, &slab_caches); + mutex_unlock(&slab_mutex); put_online_cpus(); return; } @@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) rcu_barrier(); __kmem_cache_destroy(cachep); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); put_online_cpus(); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info) new->new[smp_processor_id()] = old; } -/* Always called with the cache_chain_mutex held */ +/* Always called with the slab_mutex held */ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { @@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, return alloc_kmemlist(cachep, gfp); } -/* Called with cache_chain_mutex held always */ +/* Called with slab_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { int err; @@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w) int node = numa_mem_id(); struct delayed_work *work = to_delayed_work(w); - if (!mutex_trylock(&cache_chain_mutex)) + if (!mutex_trylock(&slab_mutex)) /* Give up. Setup the next iteration. */ goto out; - list_for_each_entry(searchp, &cache_chain, list) { + list_for_each_entry(searchp, &slab_caches, list) { check_irq_on(); /* @@ -4205,7 +4199,7 @@ next: cond_resched(); } check_irq_on(); - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); next_reap_node(); out: /* Set up the next iteration */ @@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); if (!n) print_slabinfo_header(m); - return seq_list_start(&cache_chain, *pos); + return seq_list_start(&slab_caches, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { - return seq_list_next(p, &cache_chain, pos); + return seq_list_next(p, &slab_caches, pos); } static void s_stop(struct seq_file *m, void *p) { - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); } static int s_show(struct seq_file *m, void *p) @@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, return -EINVAL; /* Find the cache in the chain of caches. */ - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); res = -EINVAL; - list_for_each_entry(cachep, &cache_chain, list) { + list_for_each_entry(cachep, &slab_caches, list) { if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { @@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, break; } } - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); if (res >= 0) res = count; return res; @@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = { static void *leaks_start(struct seq_file *m, loff_t *pos) { - mutex_lock(&cache_chain_mutex); - return seq_list_start(&cache_chain, *pos); + mutex_lock(&slab_mutex); + return seq_list_start(&slab_caches, *pos); } static inline int add_caller(unsigned long *n, unsigned long v) @@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p) name = cachep->name; if (n[0] == n[1]) { /* Increase the buffer size */ - mutex_unlock(&cache_chain_mutex); + mutex_unlock(&slab_mutex); m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); if (!m->private) { /* Too bad, we are really out */ m->private = n; - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); return -ENOMEM; } *(unsigned long *)m->private = n[0] * 2; kfree(n); - mutex_lock(&cache_chain_mutex); + mutex_lock(&slab_mutex); /* Now make sure this entry will be retried */ m->count = m->size; return 0; |