summaryrefslogtreecommitdiff
path: root/fs/ext3
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2006-03-26 13:37:55 +0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-26 20:57:00 +0400
commit89747d369d34e333b9b60f10f333a0b727b4e4e2 (patch)
tree26b55849e666c7f6c7a7312b100756c3b591559f /fs/ext3
parente2d53f9525790dfacbcf09f359536311d3913d98 (diff)
downloadlinux-89747d369d34e333b9b60f10f333a0b727b4e4e2.tar.xz
[PATCH] ext3_get_blocks: Mapping multiple blocks at a once
Currently ext3_get_block() only maps or allocates one block at a time. This is quite inefficient for sequential IO workload. I have posted a early implements a simply multiple block map and allocation with current ext3. The basic idea is allocating the 1st block in the existing way, and attempting to allocate the next adjacent blocks on a best effort basis. More description about the implementation could be found here: http://marc.theaimsgroup.com/?l=ext2-devel&m=112162230003522&w=2 The following the latest version of the patch: break the original patch into 5 patches, re-worked some logicals, and fixed some bugs. The break ups are: [patch 1] Adding map multiple blocks at a time in ext3_get_blocks() [patch 2] Extend ext3_get_blocks() to support multiple block allocation [patch 3] Implement multiple block allocation in ext3-try-to-allocate (called via ext3_new_block()). [patch 4] Proper accounting updates in ext3_new_blocks() [patch 5] Adjust reservation window size properly (by the given number of blocks to allocate) before block allocation to increase the possibility of allocating multiple blocks in a single call. Tests done so far includes fsx,tiobench and dbench. The following numbers collected from Direct IO tests (1G file creation/read) shows the system time have been greatly reduced (more than 50% on my 8 cpu system) with the patches. 1G file DIO write: 2.6.15 2.6.15+patches real 0m31.275s 0m31.161s user 0m0.000s 0m0.000s sys 0m3.384s 0m0.564s 1G file DIO read: 2.6.15 2.6.15+patches real 0m30.733s 0m30.624s user 0m0.000s 0m0.004s sys 0m0.748s 0m0.380s Some previous test we did on buffered IO with using multiple blocks allocation and delayed allocation shows noticeable improvement on throughput and system time. This patch: Add support of mapping multiple blocks in one call. This is useful for DIO reads and re-writes (where blocks are already allocated), also is in line with Christoph's proposal of using getblocks() in mpage_readpage() or mpage_readpages(). Signed-off-by: Mingming Cao <cmm@us.ibm.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ext3')
-rw-r--r--fs/ext3/dir.c5
-rw-r--r--fs/ext3/inode.c105
2 files changed, 79 insertions, 31 deletions
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 773459164bb2..38bd3f6ec147 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
- err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0);
- if (!err) {
+ err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+ &map_bh, 0, 0);
+ if (err > 0) {
page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra,
filp,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 76e22c9c9c6c..fcfb10f77120 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -330,7 +330,7 @@ static int ext3_block_to_path(struct inode *inode,
ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
}
if (boundary)
- *boundary = (i_block & (ptrs - 1)) == (final - 1);
+ *boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
@@ -669,11 +669,15 @@ err_out:
* akpm: `handle' can be NULL if create == 0.
*
* The BKL may not be held on entry here. Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
*/
int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int extend_disksize)
+ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
+ unsigned long maxblocks, struct buffer_head *bh_result,
+ int create, int extend_disksize)
{
int err = -EIO;
int offsets[4];
@@ -681,11 +685,15 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
Indirect *partial;
unsigned long goal;
int left;
- int boundary = 0;
- const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+ int blocks_to_boundary = 0;
+ int depth;
struct ext3_inode_info *ei = EXT3_I(inode);
+ int count = 0;
+ unsigned long first_block = 0;
+
J_ASSERT(handle != NULL || create == 0);
+ depth = ext3_block_to_path(inode, iblock, offsets, &blocks_to_boundary);
if (depth == 0)
goto out;
@@ -694,8 +702,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
/* Simplest case - block found, no allocation needed */
if (!partial) {
+ first_block = chain[depth - 1].key;
clear_buffer_new(bh_result);
- goto got_it;
+ count++;
+ /*map more blocks*/
+ while (count < maxblocks && count <= blocks_to_boundary) {
+ if (!verify_chain(chain, partial)) {
+ /*
+ * Indirect block might be removed by
+ * truncate while we were reading it.
+ * Handling of that case: forget what we've
+ * got now. Flag the err as EAGAIN, so it
+ * will reread.
+ */
+ err = -EAGAIN;
+ count = 0;
+ break;
+ }
+ if (le32_to_cpu(*(chain[depth-1].p+count) ==
+ (first_block + count)))
+ count++;
+ else
+ break;
+ }
+ if (err != -EAGAIN)
+ goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
@@ -723,6 +754,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
}
partial = ext3_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
+ count++;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
@@ -772,8 +804,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
set_buffer_new(bh_result);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
- if (boundary)
+ if (blocks_to_boundary == 0)
set_buffer_boundary(bh_result);
+ err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
@@ -787,21 +820,6 @@ out:
return err;
}
-static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- handle_t *handle = NULL;
- int ret;
-
- if (create) {
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 1);
- return ret;
-}
-
#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
static int
@@ -812,9 +830,12 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
handle_t *handle = journal_current_handle();
int ret = 0;
- if (!handle)
+ if (!create)
goto get_block; /* A read */
+ if (max_blocks == 1)
+ goto get_block; /* A single block get */
+
if (handle->h_transaction->t_state == T_LOCKED) {
/*
* Huge direct-io writes can hold off commits for long
@@ -841,13 +862,31 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
}
get_block:
- if (ret == 0)
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
+ if (ret == 0) {
+ ret = ext3_get_blocks_handle(handle, inode, iblock,
+ max_blocks, bh_result, create, 0);
+ if (ret > 0) {
+ bh_result->b_size = (ret << inode->i_blkbits);
+ ret = 0;
+ }
+ }
return ret;
}
+static int ext3_get_blocks(struct inode *inode, sector_t iblock,
+ unsigned long maxblocks, struct buffer_head *bh_result,
+ int create)
+{
+ return ext3_direct_io_get_blocks(inode, iblock, maxblocks,
+ bh_result, create);
+}
+
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ return ext3_get_blocks(inode, iblock, 1, bh_result, create);
+}
+
/*
* `handle' can be NULL if create is zero
*/
@@ -862,8 +901,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
dummy.b_state = 0;
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
+ err = ext3_get_blocks_handle(handle, inode, block, 1,
+ &dummy, create, 1);
+ if (err == 1) {
+ err = 0;
+ } else if (err >= 0) {
+ WARN_ON(1);
+ err = -EIO;
+ }
+ *errp = err;
+ if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (!bh) {