diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2010-09-12 23:34:05 +0400 |
---|---|---|
committer | Wu Fengguang <fengguang.wu@intel.com> | 2011-07-10 09:09:02 +0400 |
commit | 7762741e3af69720186802e945229b6a5afd5c49 (patch) | |
tree | e5ca904b7b31154b1a412bcd3a2160f31581bdb7 /mm/page-writeback.c | |
parent | 00821b002df7da867bb2c15b4f83f3706371383f (diff) | |
download | linux-7762741e3af69720186802e945229b6a5afd5c49.tar.xz |
writeback: consolidate variable names in balance_dirty_pages()
Introduce
nr_dirty = NR_FILE_DIRTY + NR_WRITEBACK + NR_UNSTABLE_NFS
in order to simplify many tests in the following patches.
balance_dirty_pages() will eventually care only about the dirty sums
besides nr_writeback.
Acked-by: Jan Kara <jack@suse.cz>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 446bdf7b975b..5f3e1b46ace5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -565,8 +565,9 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, static void balance_dirty_pages(struct address_space *mapping, unsigned long write_chunk) { - long nr_reclaimable, bdi_nr_reclaimable; - long nr_writeback, bdi_nr_writeback; + unsigned long nr_reclaimable, bdi_nr_reclaimable; + unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ + unsigned long bdi_dirty; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; @@ -579,7 +580,7 @@ static void balance_dirty_pages(struct address_space *mapping, for (;;) { nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); - nr_writeback = global_page_state(NR_WRITEBACK); + nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); global_dirty_limits(&background_thresh, &dirty_thresh); @@ -588,8 +589,7 @@ static void balance_dirty_pages(struct address_space *mapping, * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up. */ - if (nr_reclaimable + nr_writeback <= - (background_thresh + dirty_thresh) / 2) + if (nr_dirty <= (background_thresh + dirty_thresh) / 2) break; bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); @@ -607,10 +607,12 @@ static void balance_dirty_pages(struct address_space *mapping, */ if (bdi_thresh < 2*bdi_stat_error(bdi)) { bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); - bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); + bdi_dirty = bdi_nr_reclaimable + + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); - bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); + bdi_dirty = bdi_nr_reclaimable + + bdi_stat(bdi, BDI_WRITEBACK); } /* @@ -619,9 +621,8 @@ static void balance_dirty_pages(struct address_space *mapping, * bdi or process from holding back light ones; The latter is * the last resort safeguard. */ - dirty_exceeded = - (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) - || (nr_reclaimable + nr_writeback > dirty_thresh); + dirty_exceeded = (bdi_dirty > bdi_thresh) || + (nr_dirty > dirty_thresh); if (!dirty_exceeded) break; |