diff options
author | Tejun Heo <tj@kernel.org> | 2015-05-23 01:23:22 +0300 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-06-02 17:38:12 +0300 |
commit | dcc25ae76eb7b8ff883eaaab57e30e8f2f085be3 (patch) | |
tree | 7fb1d01278ad2b16a1c21ead3e567a3bb7d00c25 /mm/page-writeback.c | |
parent | 380c27ca33ebecc9da35aa90c8b3a9154f90aac2 (diff) | |
download | linux-dcc25ae76eb7b8ff883eaaab57e30e8f2f085be3.tar.xz |
writeback: move global_dirty_limit into wb_domain
This patch is a part of the series to define wb_domain which
represents a domain that wb's (bdi_writeback's) belong to and are
measured against each other in. This will enable IO backpressure
propagation for cgroup writeback.
global_dirty_limit exists to regulate the global dirty threshold which
is a property of the wb_domain. This patch moves hard_dirty_limit,
dirty_lock, and update_time into wb_domain.
This is pure reorganization and doesn't introduce any behavioral
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 08e1737edb39..27e60ba8e688 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -122,9 +122,7 @@ EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ -unsigned long global_dirty_limit; - -static struct wb_domain global_wb_domain; +struct wb_domain global_wb_domain; /* * Length of period for aging writeout fractions of bdis. This is an @@ -470,9 +468,15 @@ static void writeout_period(unsigned long t) int wb_domain_init(struct wb_domain *dom, gfp_t gfp) { memset(dom, 0, sizeof(*dom)); + + spin_lock_init(&dom->lock); + init_timer_deferrable(&dom->period_timer); dom->period_timer.function = writeout_period; dom->period_timer.data = (unsigned long)dom; + + dom->dirty_limit_tstamp = jiffies; + return fprop_global_init(&dom->completions, gfp); } @@ -532,7 +536,9 @@ static unsigned long dirty_freerun_ceiling(unsigned long thresh, static unsigned long hard_dirty_limit(unsigned long thresh) { - return max(thresh, global_dirty_limit); + struct wb_domain *dom = &global_wb_domain; + + return max(thresh, dom->dirty_limit); } /** @@ -916,17 +922,10 @@ out: wb->avg_write_bandwidth = avg; } -/* - * The global dirtyable memory and dirty threshold could be suddenly knocked - * down by a large amount (eg. on the startup of KVM in a swapless system). - * This may throw the system into deep dirty exceeded state and throttle - * heavy/light dirtiers alike. To retain good responsiveness, maintain - * global_dirty_limit for tracking slowly down to the knocked down dirty - * threshold. - */ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) { - unsigned long limit = global_dirty_limit; + struct wb_domain *dom = &global_wb_domain; + unsigned long limit = dom->dirty_limit; /* * Follow up in one step. @@ -939,7 +938,7 @@ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce - * global_dirty_limit which is guaranteed to lie above the dirty pages. + * dom->dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dirty); if (limit > thresh) { @@ -948,28 +947,27 @@ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) } return; update: - global_dirty_limit = limit; + dom->dirty_limit = limit; } static void global_update_bandwidth(unsigned long thresh, unsigned long dirty, unsigned long now) { - static DEFINE_SPINLOCK(dirty_lock); - static unsigned long update_time = INITIAL_JIFFIES; + struct wb_domain *dom = &global_wb_domain; /* * check locklessly first to optimize away locking for the most time */ - if (time_before(now, update_time + BANDWIDTH_INTERVAL)) + if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) return; - spin_lock(&dirty_lock); - if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { + spin_lock(&dom->lock); + if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { update_dirty_limit(thresh, dirty); - update_time = now; + dom->dirty_limit_tstamp = now; } - spin_unlock(&dirty_lock); + spin_unlock(&dom->lock); } /* @@ -1761,10 +1759,12 @@ void laptop_sync_completion(void) void writeback_set_ratelimit(void) { + struct wb_domain *dom = &global_wb_domain; unsigned long background_thresh; unsigned long dirty_thresh; + global_dirty_limits(&background_thresh, &dirty_thresh); - global_dirty_limit = dirty_thresh; + dom->dirty_limit = dirty_thresh; ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; |