summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/workingset.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/mm/workingset.c b/mm/workingset.c
index ef556bf1323d..241fa5d6b3b2 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -369,40 +369,46 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
{
unsigned long max_nodes;
unsigned long nodes;
- unsigned long pages;
+ unsigned long cache;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
local_irq_disable();
nodes = list_lru_shrink_count(&shadow_nodes, sc);
local_irq_enable();
- if (sc->memcg) {
- pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
- LRU_ALL_FILE);
- } else {
- pages = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
- node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
- }
-
/*
- * Active cache pages are limited to 50% of memory, and shadow
- * entries that represent a refault distance bigger than that
- * do not have any effect. Limit the number of shadow nodes
- * such that shadow entries do not exceed the number of active
- * cache pages, assuming a worst-case node population density
- * of 1/8th on average.
+ * Approximate a reasonable limit for the radix tree nodes
+ * containing shadow entries. We don't need to keep more
+ * shadow entries than possible pages on the active list,
+ * since refault distances bigger than that are dismissed.
+ *
+ * The size of the active list converges toward 100% of
+ * overall page cache as memory grows, with only a tiny
+ * inactive list. Assume the total cache size for that.
+ *
+ * Nodes might be sparsely populated, with only one shadow
+ * entry in the extreme case. Obviously, we cannot keep one
+ * node for every eligible shadow entry, so compromise on a
+ * worst-case density of 1/8th. Below that, not all eligible
+ * refaults can be detected anymore.
*
* On 64-bit with 7 radix_tree_nodes per page and 64 slots
* each, this will reclaim shadow entries when they consume
- * ~2% of available memory:
+ * ~1.8% of available memory:
*
- * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
+ * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
*/
- max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
+ if (sc->memcg) {
+ cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+ LRU_ALL_FILE);
+ } else {
+ cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
+ node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
+ }
+ max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
if (nodes <= max_nodes)
return 0;
-
return nodes - max_nodes;
}