summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2014-03-18 07:40:49 +0400
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2014-03-18 08:58:59 +0400
commit87d6f890944d092c4ef5b84053f0d0d5d8137b0b (patch)
treeb66d6ff07f4e9d838f43e3ddb0eae916507ce343 /fs/f2fs
parentf8b2c1f940dca2843fe13b55ba5868bac8040551 (diff)
downloadlinux-87d6f890944d092c4ef5b84053f0d0d5d8137b0b.tar.xz
f2fs: avoid small data writes by skipping writepages
This patch introduces nr_pages_to_skip(sbi, type) to determine writepages can be skipped. The dentry, node, and meta pages can be conrolled by F2FS without breaking the FS consistency. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/checkpoint.c4
-rw-r--r--fs/f2fs/data.c4
-rw-r--r--fs/f2fs/node.c8
-rw-r--r--fs/f2fs/segment.h19
4 files changed, 26 insertions, 9 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4c0e98ddf3db..1f52b70ff9d1 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -187,7 +187,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
- int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+ int nrpages = nr_pages_to_skip(sbi, META);
long written;
if (wbc->for_kupdate)
@@ -682,7 +682,7 @@ retry:
inode = igrab(entry->inode);
spin_unlock(&sbi->dir_inode_lock);
if (inode) {
- filemap_flush(inode->i_mapping);
+ filemap_fdatawrite(inode->i_mapping);
iput(inode);
} else {
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 101b4cd4170d..e3b7cfa17b99 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -868,6 +868,10 @@ static int f2fs_write_data_pages(struct address_space *mapping,
if (!mapping->a_ops->writepage)
return 0;
+ if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+ get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA))
+ return 0;
+
if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
desired_nrtw = MAX_DESIRED_PAGES_WP;
excess_nrtw = desired_nrtw - wbc->nr_to_write;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3e36240d81c1..cb514f1896ab 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1198,12 +1198,6 @@ redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
-/*
- * It is very important to gather dirty pages and write at once, so that we can
- * submit a big bio without interfering other data writes.
- * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
- */
-#define COLLECT_DIRTY_NODES 1536
static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -1214,7 +1208,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
f2fs_balance_fs_bg(sbi);
/* collect a number of dirty node pages and write together */
- if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
+ if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
return 0;
/* if mounting is failed, skip writing node pages */
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index c3d5e3689ffc..bbd976100d14 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -664,3 +664,22 @@ static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
struct request_queue *q = bdev_get_queue(bdev);
return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
}
+
+/*
+ * It is very important to gather dirty pages and write at once, so that we can
+ * submit a big bio without interfering other data writes.
+ * By default, 512 pages for directory data,
+ * 512 pages (2MB) * 3 for three types of nodes, and
+ * max_bio_blocks for meta are set.
+ */
+static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
+{
+ if (type == DATA)
+ return sbi->blocks_per_seg;
+ else if (type == NODE)
+ return 3 * sbi->blocks_per_seg;
+ else if (type == META)
+ return MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+ else
+ return 0;
+}