From 7dc19d5affd71370754a2c3d36b485810eaee7a1 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 28 Aug 2013 10:18:11 +1000 Subject: drivers: convert shrinkers to new count/scan API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the driver shrinkers to the new API. Most changes are compile tested only because I either don't have the hardware or it's staging stuff. FWIW, the md and android code is pretty good, but the rest of it makes me want to claw my eyes out. The amount of broken code I just encountered is mind boggling. I've added comments explaining what is broken, but I fear that some of the code would be best dealt with by being dragged behind the bike shed, burying in mud up to it's neck and then run over repeatedly with a blunt lawn mower. Special mention goes to the zcache/zcache2 drivers. They can't co-exist in the build at the same time, they are under different menu options in menuconfig, they only show up when you've got the right set of mm subsystem options configured and so even compile testing is an exercise in pulling teeth. And that doesn't even take into account the horrible, broken code... [glommer@openvz.org: fixes for i915, android lowmem, zcache, bcache] Signed-off-by: Dave Chinner Signed-off-by: Glauber Costa Acked-by: Mel Gorman Cc: Daniel Vetter Cc: Kent Overstreet Cc: John Stultz Cc: David Rientjes Cc: Jerome Glisse Cc: Thomas Hellstrom Cc: "Theodore Ts'o" Cc: Adrian Hunter Cc: Al Viro Cc: Artem Bityutskiy Cc: Arve Hjønnevåg Cc: Carlos Maiolino Cc: Christoph Hellwig Cc: Chuck Lever Cc: Daniel Vetter Cc: David Rientjes Cc: Gleb Natapov Cc: Greg Thelen Cc: J. Bruce Fields Cc: Jan Kara Cc: Jerome Glisse Cc: John Stultz Cc: KAMEZAWA Hiroyuki Cc: Kent Overstreet Cc: Kirill A. Shutemov Cc: Marcelo Tosatti Cc: Mel Gorman Cc: Steven Whitehouse Cc: Thomas Hellstrom Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- drivers/md/bcache/btree.c | 43 ++++++++++++++++++------------- drivers/md/bcache/sysfs.c | 2 +- drivers/md/dm-bufio.c | 64 +++++++++++++++++++++++++++++------------------ 3 files changed, 66 insertions(+), 43 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ee372884c405..f9764e61978b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) return 0; } -static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long bch_mca_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct cache_set *c = container_of(shrink, struct cache_set, shrink); struct btree *b, *t; unsigned long i, nr = sc->nr_to_scan; + unsigned long freed = 0; if (c->shrinker_disabled) - return 0; + return SHRINK_STOP; if (c->try_harder) - return 0; - - /* - * If nr == 0, we're supposed to return the number of items we have - * cached. Not allowed to return -1. - */ - if (!nr) - return mca_can_free(c) * c->btree_pages; + return SHRINK_STOP; /* Return -1 if we can't do anything right now */ if (sc->gfp_mask & __GFP_WAIT) @@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) i = 0; list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { - if (!nr) + if (freed >= nr) break; if (++i > 3 && !mca_reap(b, NULL, 0)) { mca_data_free(b); rw_unlock(true, b); - --nr; + freed++; } } @@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) if (list_empty(&c->btree_cache)) goto out; - for (i = 0; nr && i < c->bucket_cache_used; i++) { + for (i = 0; (nr--) && i < c->bucket_cache_used; i++) { b = list_first_entry(&c->btree_cache, struct btree, list); list_rotate_left(&c->btree_cache); @@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); - --nr; + freed++; } else b->accessed = 0; } out: - nr = mca_can_free(c) * c->btree_pages; mutex_unlock(&c->bucket_lock); - return nr; + return freed; +} + +static unsigned long bch_mca_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct cache_set *c = container_of(shrink, struct cache_set, shrink); + + if (c->shrinker_disabled) + return 0; + + if (c->try_harder) + return 0; + + return mca_can_free(c) * c->btree_pages; } void bch_btree_cache_free(struct cache_set *c) @@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c) c->verify_data = NULL; #endif - c->shrink.shrink = bch_mca_shrink; + c->shrink.count_objects = bch_mca_count; + c->shrink.scan_objects = bch_mca_scan; c->shrink.seeks = 4; c->shrink.batch = c->btree_pages * 2; register_shrinker(&c->shrink); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 12a2c2846f99..4fe6ab2fbe2e 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -556,7 +556,7 @@ STORE(__bch_cache_set) struct shrink_control sc; sc.gfp_mask = GFP_KERNEL; sc.nr_to_scan = strtoul_or_return(buf); - c->shrink.shrink(&c->shrink, &sc); + c->shrink.scan_objects(&c->shrink, &sc); } sysfs_strtoul(congested_read_threshold_us, diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 5227e079a6e3..173cbb20d104 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, unsigned long max_jiffies) { if (jiffies - b->last_accessed < max_jiffies) - return 1; + return 0; if (!(gfp & __GFP_IO)) { if (test_bit(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) - return 1; + return 0; } if (b->hold_count) - return 1; + return 0; __make_buffer_clean(b); __unlink_buffer(b); __free_buffer_wake(b); - return 0; + return 1; } -static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, - struct shrink_control *sc) +static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, + gfp_t gfp_mask) { int l; struct dm_buffer *b, *tmp; + long freed = 0; for (l = 0; l < LIST_SIZE; l++) { - list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) - if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) && - !--nr_to_scan) - return; + list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { + freed += __cleanup_old_buffer(b, gfp_mask, 0); + if (!--nr_to_scan) + break; + } dm_bufio_cond_resched(); } + return freed; } -static int shrink(struct shrinker *shrinker, struct shrink_control *sc) +static unsigned long +dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct dm_bufio_client *c = - container_of(shrinker, struct dm_bufio_client, shrinker); - unsigned long r; - unsigned long nr_to_scan = sc->nr_to_scan; + struct dm_bufio_client *c; + unsigned long freed; + c = container_of(shrink, struct dm_bufio_client, shrinker); if (sc->gfp_mask & __GFP_IO) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) - return !nr_to_scan ? 0 : -1; + return SHRINK_STOP; - if (nr_to_scan) - __scan(c, nr_to_scan, sc); + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); + dm_bufio_unlock(c); + return freed; +} - r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; - if (r > INT_MAX) - r = INT_MAX; +static unsigned long +dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + struct dm_bufio_client *c; + unsigned long count; - dm_bufio_unlock(c); + c = container_of(shrink, struct dm_bufio_client, shrinker); + if (sc->gfp_mask & __GFP_IO) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) + return 0; - return r; + count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; + dm_bufio_unlock(c); + return count; } /* @@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign __cache_size_refresh(); mutex_unlock(&dm_bufio_clients_lock); - c->shrinker.shrink = shrink; + c->shrinker.count_objects = dm_bufio_shrink_count; + c->shrinker.scan_objects = dm_bufio_shrink_scan; c->shrinker.seeks = 1; c->shrinker.batch = 0; register_shrinker(&c->shrinker); @@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void) struct dm_buffer *b; b = list_entry(c->lru[LIST_CLEAN].prev, struct dm_buffer, lru_list); - if (__cleanup_old_buffer(b, 0, max_age * HZ)) + if (!__cleanup_old_buffer(b, 0, max_age * HZ)) break; dm_bufio_cond_resched(); } -- cgit v1.2.3