summaryrefslogtreecommitdiff
path: root/include/linux/shrinker.h
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2018-09-26 16:54:31 +0300
committerBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2018-09-26 16:54:31 +0300
commitaaccf3c97418f169afdbb5855e9cbcbda34e90fd (patch)
tree5d4207e67958bdbc23288cf30178692f5534e1a0 /include/linux/shrinker.h
parentf39684524b391c5a7ed0ac44db4fec3357af1c5d (diff)
parent6bf4ca7fbc85d80446ac01c0d1d77db4d91a6d84 (diff)
downloadlinux-aaccf3c97418f169afdbb5855e9cbcbda34e90fd.tar.xz
Merge tag 'v4.19-rc5' of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into fbdev-for-next
Sync with upstream (which now contains fbdev-v4.19 changes) to prepare a base for fbdev-v4.20 changes.
Diffstat (limited to 'include/linux/shrinker.h')
-rw-r--r--include/linux/shrinker.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 6794490f25b2..9443cafd1969 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -12,6 +12,9 @@
struct shrink_control {
gfp_t gfp_mask;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
@@ -26,20 +29,20 @@ struct shrink_control {
*/
unsigned long nr_scanned;
- /* current node being shrunk (for NUMA aware shrinkers) */
- int nid;
-
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
+#define SHRINK_EMPTY (~0UL - 1)
/*
* A callback you can register to apply pressure to ageable caches.
*
* @count_objects should return the number of freeable items in the cache. If
- * there are no objects to free or the number of freeable items cannot be
- * determined, it should return 0. No deadlock checks should be done during the
+ * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
+ * returned in cases of the number of freeable items cannot be determined
+ * or shrinker should skip this cache for this time (e.g., their number
+ * is below shrinkable limit). No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending.
@@ -60,12 +63,16 @@ struct shrinker {
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
- unsigned long flags;
+ int seeks; /* seeks to recreate an obj */
+ unsigned flags;
/* These are for internal use */
struct list_head list;
+#ifdef CONFIG_MEMCG_KMEM
+ /* ID in shrinker_idr */
+ int id;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};