diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2017-05-04 00:52:13 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-04 01:52:08 +0300 |
commit | 3db65812d68883d1acfd20a9f2bc2cd13a7cd5c5 (patch) | |
tree | 759aa3170445d5c5fce8c469098853c3a2d470fb /mm/vmscan.c | |
parent | c822f6223d03c2c5b026a21da09c6b6d523258cd (diff) | |
download | linux-3db65812d68883d1acfd20a9f2bc2cd13a7cd5c5.tar.xz |
Revert "mm, vmscan: account for skipped pages as a partial scan"
This reverts commit d7f05528eedb047efe2288cff777676b028747b6.
Now that reclaimability of a node is no longer based on the ratio
between pages scanned and theoretically reclaimable pages, we can remove
accounting tricks for pages skipped due to zone constraints.
Link: http://lkml.kernel.org/r/20170228214007.5621-9-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Jia He <hejianet@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 22 |
1 files changed, 4 insertions, 18 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 02f2eb51b33e..77832f0dbe0d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1472,12 +1472,12 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long nr_taken = 0; unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; - unsigned long skipped = 0, total_skipped = 0; + unsigned long skipped = 0; unsigned long scan, nr_pages; LIST_HEAD(pages_skipped); for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && - !list_empty(src);) { + !list_empty(src); scan++) { struct page *page; page = lru_to_page(src); @@ -1491,12 +1491,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, continue; } - /* - * Account for scanned and skipped separetly to avoid the pgdat - * being prematurely marked unreclaimable by pgdat_reclaimable. - */ - scan++; - switch (__isolate_lru_page(page, mode)) { case 0: nr_pages = hpage_nr_pages(page); @@ -1525,6 +1519,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, if (!list_empty(&pages_skipped)) { int zid; + list_splice(&pages_skipped, src); for (zid = 0; zid < MAX_NR_ZONES; zid++) { if (!nr_skipped[zid]) continue; @@ -1532,17 +1527,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); skipped += nr_skipped[zid]; } - - /* - * Account skipped pages as a partial scan as the pgdat may be - * close to unreclaimable. If the LRU list is empty, account - * skipped pages as a full scan. - */ - total_skipped = list_empty(src) ? skipped : skipped >> 2; - - list_splice(&pages_skipped, src); } - *nr_scanned = scan + total_skipped; + *nr_scanned = scan; trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan, skipped, nr_taken, mode, lru); update_lru_sizes(lruvec, lru, nr_zone_taken); |