summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-28 04:18:16 +0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-11 02:56:32 +0400
commita0b02131c5fcd8545b867db72224b3659e813f10 (patch)
tree3ba5156965ca4625cd5a4ad78405180143eaf15c
parent70534a739c12b908789e27b08512d2615ba40f2f (diff)
downloadlinux-a0b02131c5fcd8545b867db72224b3659e813f10.tar.xz
shrinker: Kill old ->shrink API.
There are no more users of this API, so kill it dead, dead, dead and quietly bury the corpse in a shallow, unmarked grave in a dark forest deep in the hills... [glommer@openvz.org: added flowers to the grave] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--include/linux/shrinker.h15
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--mm/vmscan.c41
3 files changed, 15 insertions, 45 deletions
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 8f80f243fed9..68c097077ef0 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -7,14 +7,15 @@
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
- *
- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
- * querying the cache size, so a fastpath for that case is appropriate.
*/
struct shrink_control {
gfp_t gfp_mask;
- /* How many slab objects shrinker() should scan and try to reclaim */
+ /*
+ * How many objects scan_objects should scan and try to reclaim.
+ * This is reset before every call, so it is safe for callees
+ * to modify.
+ */
unsigned long nr_to_scan;
/* shrink from these nodes */
@@ -27,11 +28,6 @@ struct shrink_control {
/*
* A callback you can register to apply pressure to ageable caches.
*
- * @shrink() should look through the least-recently-used 'nr_to_scan' entries
- * and attempt to free them up. It should return the number of objects which
- * remain in the cache. If it returns -1, it means it cannot do any scanning at
- * this time (eg. there is a risk of deadlock).
- *
* @count_objects should return the number of freeable items in the cache. If
* there are no objects to free or the number of freeable items cannot be
* determined, it should return 0. No deadlock checks should be done during the
@@ -50,7 +46,6 @@ struct shrink_control {
* @flags determine the shrinker abilities, like numa awareness
*/
struct shrinker {
- int (*shrink)(struct shrinker *, struct shrink_control *sc);
unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *,
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 63cfcccaebb3..132a985aba8b 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -202,7 +202,7 @@ TRACE_EVENT(mm_shrink_slab_start,
TP_fast_assign(
__entry->shr = shr;
- __entry->shrink = shr->shrink;
+ __entry->shrink = shr->scan_objects;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
__entry->pgs_scanned = pgs_scanned;
@@ -241,7 +241,7 @@ TRACE_EVENT(mm_shrink_slab_end,
TP_fast_assign(
__entry->shr = shr;
- __entry->shrink = shr->shrink;
+ __entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt;
__entry->retval = shrinker_retval;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 799ebceeb4f7..e36454220614 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -194,14 +194,6 @@ void unregister_shrinker(struct shrinker *shrinker)
}
EXPORT_SYMBOL(unregister_shrinker);
-static inline int do_shrinker_shrink(struct shrinker *shrinker,
- struct shrink_control *sc,
- unsigned long nr_to_scan)
-{
- sc->nr_to_scan = nr_to_scan;
- return (*shrinker->shrink)(shrinker, sc);
-}
-
#define SHRINK_BATCH 128
static unsigned long
@@ -218,10 +210,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
- if (shrinker->count_objects)
- max_pass = shrinker->count_objects(shrinker, shrinkctl);
- else
- max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0);
+ max_pass = shrinker->count_objects(shrinker, shrinkctl);
if (max_pass == 0)
return 0;
@@ -240,7 +229,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
if (total_scan < 0) {
printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n",
- shrinker->shrink, total_scan);
+ shrinker->scan_objects, total_scan);
total_scan = max_pass;
}
@@ -272,27 +261,13 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
max_pass, delta, total_scan);
while (total_scan >= batch_size) {
+ unsigned long ret;
- if (shrinker->scan_objects) {
- unsigned long ret;
- shrinkctl->nr_to_scan = batch_size;
- ret = shrinker->scan_objects(shrinker, shrinkctl);
-
- if (ret == SHRINK_STOP)
- break;
- freed += ret;
- } else {
- int nr_before;
- long ret;
-
- nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0);
- ret = do_shrinker_shrink(shrinker, shrinkctl,
- batch_size);
- if (ret == -1)
- break;
- if (ret < nr_before)
- freed += nr_before - ret;
- }
+ shrinkctl->nr_to_scan = batch_size;
+ ret = shrinker->scan_objects(shrinker, shrinkctl);
+ if (ret == SHRINK_STOP)
+ break;
+ freed += ret;
count_vm_events(SLABS_SCANNED, batch_size);
total_scan -= batch_size;