summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-24 03:51:09 +0400
committerChris Mason <chris.mason@oracle.com>2009-09-24 04:30:52 +0400
commit42daec299b8b6b9605976d0ee1266b343a31cbcc (patch)
tree6813d5d2b30c346413e329f0ad4d6f2262991311 /fs
parent7ce618db9878689f87897b673fa3329070860fc7 (diff)
downloadlinux-42daec299b8b6b9605976d0ee1266b343a31cbcc.tar.xz
Btrfs: fix errors handling cached state in set/clear_extent_bit
Both set and clear_extent_bit allow passing a cached state struct to reduce rbtree search times. clear_extent_bit was improperly bypassing some of the checks around making sure the extent state fields were correct for a given operation. The fix used here (from Yan Zheng) is to use the hit_next goto target instead of jumping all the way down to start clearing bits without making sure the cached state was exactly correct for the operation we were doing. This also fixes up the setting of the start variable for both ops in the case where we find an overlapping extent that begins before the range we want to change. In both cases we were incorrectly going backwards from the original requested change. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7e16c6d8153f..b9506548853b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -495,11 +495,11 @@ again:
if (cached_state) {
cached = *cached_state;
*cached_state = NULL;
- if (cached->tree && cached->start == start) {
+ cached_state = NULL;
+ if (cached && cached->tree && cached->start == start) {
atomic_dec(&cached->refs);
state = cached;
- last_end = state->end;
- goto found;
+ goto hit_next;
}
free_extent_state(cached);
}
@@ -547,8 +547,6 @@ hit_next:
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- } else {
- start = state->start;
}
goto search_again;
}
@@ -566,16 +564,18 @@ hit_next:
if (wake)
wake_up(&state->wq);
+
set |= clear_state_bit(tree, prealloc, bits,
wake, delete);
prealloc = NULL;
goto out;
}
-found:
+
if (state->end < end && prealloc && !need_resched())
next_node = rb_next(&state->rb_node);
else
next_node = NULL;
+
set |= clear_state_bit(tree, state, bits, wake, delete);
if (last_end == (u64)-1)
goto out;
@@ -712,6 +712,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int err = 0;
u64 last_start;
u64 last_end;
+
again:
if (!prealloc && (mask & __GFP_WAIT)) {
prealloc = alloc_extent_state(mask);
@@ -756,6 +757,7 @@ hit_next:
err = -EEXIST;
goto out;
}
+
set_state_bits(tree, state, bits);
cache_state(state, cached_state);
merge_state(tree, state);
@@ -809,8 +811,6 @@ hit_next:
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- } else {
- start = state->start;
}
goto search_again;
}