summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c115
1 files changed, 62 insertions, 53 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56ddf41149eb..39fdfb14eeaa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -60,8 +60,8 @@ struct scan_control {
int may_writepage;
- /* Can pages be swapped as part of reclaim? */
- int may_swap;
+ /* Can mapped pages be reclaimed? */
+ int may_unmap;
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
@@ -78,6 +78,12 @@ struct scan_control {
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
+ /*
+ * Nodemask of nodes allowed by the caller. If NULL, all nodes
+ * are scanned.
+ */
+ nodemask_t *nodemask;
+
/* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode,
@@ -214,8 +220,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0) {
- printk(KERN_ERR "%s: nr=%ld\n",
- __func__, shrinker->nr);
+ printk(KERN_ERR "shrink_slab: %pF negative objects to "
+ "delete nr=%ld\n",
+ shrinker->shrink, shrinker->nr);
shrinker->nr = max_pass;
}
@@ -276,7 +283,7 @@ static inline int page_mapping_inuse(struct page *page)
static inline int is_page_cache_freeable(struct page *page)
{
- return page_count(page) - !!PagePrivate(page) == 2;
+ return page_count(page) - !!page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -360,7 +367,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* Some data journaling orphaned pages can have
* page->mapping == NULL while being dirty with clean buffers.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (try_to_free_buffers(page)) {
ClearPageDirty(page);
printk("%s: orphaned page\n", __func__);
@@ -606,7 +613,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(!page_evictable(page, NULL)))
goto cull_mlocked;
- if (!sc->may_swap && page_mapped(page))
+ if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
@@ -720,7 +727,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* process address space (page_count == 1) it can be freed.
* Otherwise, leave the page on the LRU so it is swappable.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (!try_to_release_page(page, sc->gfp_mask))
goto activate_locked;
if (!mapping && page_count(page) == 1) {
@@ -1298,17 +1305,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
- if (vm_swap_full())
- pagevec_swap_free(&pvec);
-
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
pagevec_release(&pvec);
}
@@ -1543,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
struct zone *zone;
sc->all_unreclaimable = 1;
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+ sc->nodemask) {
if (!populated_zone(zone))
continue;
/*
@@ -1688,17 +1690,18 @@ out:
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, nodemask_t *nodemask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
- .may_swap = 1,
+ .may_unmap = 1,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
+ .nodemask = nodemask,
};
return do_try_to_free_pages(zonelist, &sc);
@@ -1713,17 +1716,18 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct scan_control sc = {
.may_writepage = !laptop_mode,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
+ .nodemask = NULL, /* we don't care the placement */
};
struct zonelist *zonelist;
if (noswap)
- sc.may_swap = 0;
+ sc.may_unmap = 0;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1762,7 +1766,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
.order = order,
@@ -1963,7 +1967,9 @@ static int kswapd(void *p)
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
- node_to_cpumask_ptr(cpumask, pgdat->node_id);
+ const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+ lockdep_set_current_reclaim_state(GFP_KERNEL);
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
@@ -2048,22 +2054,19 @@ unsigned long global_lru_pages(void)
#ifdef CONFIG_PM
/*
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
*
* For pass > 3 we also try to shrink the LRU lists that contain a few pages
*/
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long ret = 0;
+ unsigned long nr_reclaimed = 0;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
enum lru_list l;
- if (!populated_zone(zone))
- continue;
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
continue;
@@ -2082,14 +2085,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
zone->lru[l].nr_scan = 0;
nr_to_scan = min(nr_pages, lru_pages);
- ret += shrink_list(l, nr_to_scan, zone,
+ nr_reclaimed += shrink_list(l, nr_to_scan, zone,
sc, prio);
- if (ret >= nr_pages)
- return ret;
+ if (nr_reclaimed >= nr_pages) {
+ sc->nr_reclaimed = nr_reclaimed;
+ return;
+ }
}
}
}
- return ret;
+ sc->nr_reclaimed = nr_reclaimed;
}
/*
@@ -2103,13 +2108,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
unsigned long shrink_all_memory(unsigned long nr_pages)
{
unsigned long lru_pages, nr_slab;
- unsigned long ret = 0;
int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 0,
- .swap_cluster_max = nr_pages,
+ .may_unmap = 0,
.may_writepage = 1,
.isolate_pages = isolate_pages_global,
};
@@ -2125,8 +2128,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
if (!reclaim_state.reclaimed_slab)
break;
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
nr_slab -= reclaim_state.reclaimed_slab;
@@ -2145,21 +2148,22 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
/* Force reclaiming mapped pages in the passes #3 and #4 */
if (pass > 2)
- sc.may_swap = 1;
+ sc.may_unmap = 1;
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - ret;
+ unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
sc.nr_scanned = 0;
- ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (ret >= nr_pages)
+ sc.swap_cluster_max = nr_to_scan;
+ shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2168,21 +2172,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
}
/*
- * If ret = 0, we could not shrink LRUs, but there may be something
- * in slab caches
+ * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+ * something in slab caches
*/
- if (!ret) {
+ if (!sc.nr_reclaimed) {
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ } while (sc.nr_reclaimed < nr_pages &&
+ reclaim_state.reclaimed_slab > 0);
}
+
out:
current->reclaim_state = NULL;
- return ret;
+ return sc.nr_reclaimed;
}
#endif
@@ -2198,7 +2204,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_node_state(nid, N_HIGH_MEMORY) {
pg_data_t *pgdat = NODE_DATA(nid);
- node_to_cpumask_ptr(mask, pgdat->node_id);
+ const struct cpumask *mask;
+
+ mask = cpumask_of_node(pgdat->node_id);
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */
@@ -2288,11 +2296,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
- .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+ .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
+ .order = order,
.isolate_pages = isolate_pages_global,
};
unsigned long slab_reclaimable;