diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-03-22 11:09:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 18:54:05 +0300 |
commit | aab2207cf8d9c343b6b5f0e4d27e1732f8618d14 (patch) | |
tree | deb851a556ac7d2339a5bc83985a33fe126321ee /mm/slab.c | |
parent | 35386e3b0f876bf194982f48f027af0c216499ce (diff) | |
download | linux-aab2207cf8d9c343b6b5f0e4d27e1732f8618d14.tar.xz |
[PATCH] slab: make drain_array more universal by adding more parameters
And a parameter to drain_array to control the freeing of all objects and
then use drain_array() to replace instances of drain_array_locked with
drain_array. Doing so will avoid taking locks in those locations if the
arrays are empty.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c index 1845c0127394..d73b38e7d7e8 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2126,6 +2126,10 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, int force, int node); +static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, + struct array_cache *ac, + int force, int node); + static void do_drain(void *arg) { struct kmem_cache *cachep = arg; @@ -2150,9 +2154,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep) for_each_online_node(node) { l3 = cachep->nodelists[node]; if (l3) { - spin_lock_irq(&l3->list_lock); - drain_array_locked(cachep, l3->shared, 1, node); - spin_unlock_irq(&l3->list_lock); + drain_array(cachep, l3, l3->shared, 1, node); if (l3->alien) drain_alien_cache(cachep, l3->alien); } @@ -3545,12 +3547,11 @@ static void drain_array_locked(struct kmem_cache *cachep, * necessary. */ static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, - struct array_cache *ac) + struct array_cache *ac, int force, int node) { if (ac && ac->avail) { spin_lock_irq(&l3->list_lock); - drain_array_locked(searchp, ac, 0, - numa_node_id()); + drain_array_locked(searchp, ac, force, node); spin_unlock_irq(&l3->list_lock); } } @@ -3571,6 +3572,7 @@ static void cache_reap(void *unused) { struct list_head *walk; struct kmem_list3 *l3; + int node = numa_node_id(); if (!mutex_trylock(&cache_chain_mutex)) { /* Give up. Setup the next iteration. */ @@ -3593,11 +3595,11 @@ static void cache_reap(void *unused) * have established with reasonable certainty that * we can do some work if the lock was obtained. */ - l3 = searchp->nodelists[numa_node_id()]; + l3 = searchp->nodelists[node]; reap_alien(searchp, l3); - drain_array(searchp, l3, cpu_cache_get(searchp)); + drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); /* * These are racy checks but it does not matter @@ -3608,7 +3610,7 @@ static void cache_reap(void *unused) l3->next_reap = jiffies + REAPTIMEOUT_LIST3; - drain_array(searchp, l3, l3->shared); + drain_array(searchp, l3, l3->shared, 0, node); if (l3->free_touched) { l3->free_touched = 0; |