summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-04-21 22:06:32 +0400
committerWu Fengguang <fengguang.wu@intel.com>2011-06-08 04:25:21 +0400
commite8dfc30582995ae12454cda517b17d6294175b07 (patch)
tree357fe8482d6d125c5b96bb398b4a588cc2c1f16f /fs
parentf758eeabeb96f878c860e8f110f94ec8820822a9 (diff)
downloadlinux-e8dfc30582995ae12454cda517b17d6294175b07.tar.xz
writeback: elevate queue_io() into wb_writeback()
Code refactor for more logical code layout. No behavior change. - remove the mis-named __writeback_inodes_sb() - wb_writeback()/writeback_inodes_wb() will decide when to queue_io() before calling __writeback_inodes_wb() Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 36a30917e0dc..565b1fd15be6 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -580,17 +580,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
return 1;
}
-void writeback_inodes_wb(struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static void __writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
int ret = 0;
if (!wbc->wb_start)
wbc->wb_start = jiffies; /* livelock avoidance */
- spin_lock(&wb->list_lock);
-
- if (list_empty(&wb->b_io))
- queue_io(wb, wbc->older_than_this);
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
@@ -606,19 +602,16 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
if (ret)
break;
}
- spin_unlock(&wb->list_lock);
/* Leave any unwritten inodes on b_io */
}
-static void __writeback_inodes_sb(struct super_block *sb,
- struct bdi_writeback *wb, struct writeback_control *wbc)
+void writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
- WARN_ON(!rwsem_is_locked(&sb->s_umount));
-
spin_lock(&wb->list_lock);
if (list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
- writeback_sb_inodes(sb, wb, wbc, true);
+ __writeback_inodes_wb(wb, wbc);
spin_unlock(&wb->list_lock);
}
@@ -685,7 +678,7 @@ static long wb_writeback(struct bdi_writeback *wb,
* The intended call sequence for WB_SYNC_ALL writeback is:
*
* wb_writeback()
- * __writeback_inodes_sb() <== called only once
+ * writeback_sb_inodes() <== called only once
* write_cache_pages() <== called once for each inode
* (quickly) tag currently dirty pages
* (maybe slowly) sync all tagged pages
@@ -694,6 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb,
write_chunk = LONG_MAX;
wbc.wb_start = jiffies; /* livelock avoidance */
+ spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
@@ -730,10 +724,12 @@ static long wb_writeback(struct bdi_writeback *wb,
wbc.inodes_written = 0;
trace_wbc_writeback_start(&wbc, wb->bdi);
+ if (list_empty(&wb->b_io))
+ queue_io(wb, wbc.older_than_this);
if (work->sb)
- __writeback_inodes_sb(work->sb, wb, &wbc);
+ writeback_sb_inodes(work->sb, wb, &wbc, true);
else
- writeback_inodes_wb(wb, &wbc);
+ __writeback_inodes_wb(wb, &wbc);
trace_wbc_writeback_written(&wbc, wb->bdi);
work->nr_pages -= write_chunk - wbc.nr_to_write;
@@ -761,7 +757,6 @@ static long wb_writeback(struct bdi_writeback *wb,
* become available for writeback. Otherwise
* we'll just busyloop.
*/
- spin_lock(&wb->list_lock);
if (!list_empty(&wb->b_more_io)) {
inode = wb_inode(wb->b_more_io.prev);
trace_wbc_writeback_wait(&wbc, wb->bdi);
@@ -769,8 +764,8 @@ static long wb_writeback(struct bdi_writeback *wb,
inode_wait_for_writeback(inode, wb);
spin_unlock(&inode->i_lock);
}
- spin_unlock(&wb->list_lock);
}
+ spin_unlock(&wb->list_lock);
return wrote;
}