summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-06-16 15:23:15 +0400
committerIngo Molnar <mingo@elte.hu>2011-06-16 15:23:22 +0400
commitb4f9f2b64aa189c5584f266f4f0343af7a705441 (patch)
treef410718bb93590ff61682b566c10f70d5883bbcd /fs
parent76369139ceb955deefc509e6e12ce9d6ce50ccab (diff)
parent2c53b436a30867eb6b47dd7bab23ba638d1fb0d2 (diff)
downloadlinux-b4f9f2b64aa189c5584f266f4f0343af7a705441.tar.xz
Merge commit 'v3.0-rc3' into perf/core
Merge reason: add the latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c32
-rw-r--r--fs/btrfs/ctree.h22
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/disk-io.c41
-rw-r--r--fs/btrfs/extent-tree.c158
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/free-space-cache.c231
-rw-r--r--fs/btrfs/inode-map.c34
-rw-r--r--fs/btrfs/inode.c265
-rw-r--r--fs/btrfs/ioctl.c49
-rw-r--r--fs/btrfs/relocation.c34
-rw-r--r--fs/btrfs/scrub.c192
-rw-r--r--fs/btrfs/super.c10
-rw-r--r--fs/btrfs/transaction.c307
-rw-r--r--fs/btrfs/transaction.h29
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c35
-rw-r--r--fs/ceph/inode.c18
-rw-r--r--fs/ceph/ioctl.c6
-rw-r--r--fs/ceph/locks.c29
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c6
-rw-r--r--fs/cifs/Kconfig3
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/connect.c6
-rw-r--r--fs/dcookies.c3
-rw-r--r--fs/exec.c5
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nilfs2/btree.c39
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/partitions/check.c10
-rw-r--r--fs/super.c2
-rw-r--r--fs/ubifs/io.c2
-rw-r--r--fs/ubifs/journal.c1
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/recovery.c164
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/shrinker.c6
-rw-r--r--fs/ubifs/super.c42
-rw-r--r--fs/ubifs/tnc.c9
-rw-r--r--fs/ubifs/ubifs.h4
55 files changed, 1133 insertions, 768 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1f2b19978333..1a2421f908f0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
* individual writeable reference is too fragile given the
* way @mode is used in blkdev_get/put().
*/
- if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
- !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
+ if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
+ (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
bdev->bd_write_holder = true;
disk_block_events(disk);
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 93b1aa932014..52d7eca8c7bf 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -121,9 +121,6 @@ struct btrfs_inode {
*/
u64 index_cnt;
- /* the start of block group preferred for allocations. */
- u64 block_group;
-
/* the fsync log has some corner cases that mean we have to check
* directories to see if any unlinks have been done before
* the directory was logged. See tree-log.c for all the
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b0e18d986e0a..2e667868e0d2 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -43,8 +43,6 @@ struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
- if (path)
- path->reada = 1;
return path;
}
@@ -1224,11 +1222,13 @@ static void reada_for_search(struct btrfs_root *root,
u64 search;
u64 target;
u64 nread = 0;
+ u64 gen;
int direction = path->reada;
struct extent_buffer *eb;
u32 nr;
u32 blocksize;
u32 nscan = 0;
+ bool map = true;
if (level != 1)
return;
@@ -1250,7 +1250,19 @@ static void reada_for_search(struct btrfs_root *root,
nritems = btrfs_header_nritems(node);
nr = slot;
+ if (node->map_token || path->skip_locking)
+ map = false;
+
while (1) {
+ if (map && !node->map_token) {
+ unsigned long offset = btrfs_node_key_ptr_offset(nr);
+ map_private_extent_buffer(node, offset,
+ sizeof(struct btrfs_key_ptr),
+ &node->map_token,
+ &node->kaddr,
+ &node->map_start,
+ &node->map_len, KM_USER1);
+ }
if (direction < 0) {
if (nr == 0)
break;
@@ -1268,14 +1280,23 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
- readahead_tree_block(root, search, blocksize,
- btrfs_node_ptr_generation(node, nr));
+ gen = btrfs_node_ptr_generation(node, nr);
+ if (map && node->map_token) {
+ unmap_extent_buffer(node, node->map_token,
+ KM_USER1);
+ node->map_token = NULL;
+ }
+ readahead_tree_block(root, search, blocksize, gen);
nread += blocksize;
}
nscan++;
if ((nread > 65536 || nscan > 32))
break;
}
+ if (map && node->map_token) {
+ unmap_extent_buffer(node, node->map_token, KM_USER1);
+ node->map_token = NULL;
+ }
}
/*
@@ -1648,9 +1669,6 @@ again:
}
cow_done:
BUG_ON(!cow && ins_len);
- if (level != btrfs_header_level(b))
- WARN_ON(1);
- level = btrfs_header_level(b);
p->nodes[level] = b;
if (!p->skip_locking)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6c093fa98f61..378b5b4443f3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -930,7 +930,6 @@ struct btrfs_fs_info {
* is required instead of the faster short fsync log commits
*/
u64 last_trans_log_full_commit;
- u64 open_ioctl_trans;
unsigned long mount_opt:20;
unsigned long compress_type:4;
u64 max_inline;
@@ -947,7 +946,6 @@ struct btrfs_fs_info {
struct super_block *sb;
struct inode *btree_inode;
struct backing_dev_info bdi;
- struct mutex trans_mutex;
struct mutex tree_log_mutex;
struct mutex transaction_kthread_mutex;
struct mutex cleaner_mutex;
@@ -968,6 +966,7 @@ struct btrfs_fs_info {
struct rw_semaphore subvol_sem;
struct srcu_struct subvol_srcu;
+ spinlock_t trans_lock;
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
@@ -980,6 +979,7 @@ struct btrfs_fs_info {
atomic_t async_submit_draining;
atomic_t nr_async_bios;
atomic_t async_delalloc_pages;
+ atomic_t open_ioctl_trans;
/*
* this is used by the balancing code to wait for all the pending
@@ -1044,6 +1044,7 @@ struct btrfs_fs_info {
int closing;
int log_root_recovering;
int enospc_unlink;
+ int trans_no_join;
u64 total_pinned;
@@ -1065,7 +1066,6 @@ struct btrfs_fs_info {
struct reloc_control *reloc_ctl;
spinlock_t delalloc_lock;
- spinlock_t new_trans_lock;
u64 delalloc_bytes;
/* data_alloc_cluster is only used in ssd mode */
@@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
+#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2238,6 +2239,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
void btrfs_block_rsv_release(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
+int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv);
int btrfs_set_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2350,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent);
+static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
+{
+ /*
+ * Get synced with close_ctree()
+ */
+ smp_mb();
+ return fs_info->closing;
+}
+
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path,
@@ -2512,8 +2525,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root,
- u64 new_dirid, u64 alloc_hint);
+ struct btrfs_root *new_root, u64 new_dirid);
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 01e29503a54b..6462c29d2d37 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&head);
next = item;
+ nitems = 0;
/*
* count the number of the continuous items that we can insert in batch
@@ -1129,7 +1130,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
delayed_node = async_node->delayed_node;
root = delayed_node->root;
- trans = btrfs_join_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto free_path;
@@ -1572,8 +1573,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_transid(inode_item, trans->transid);
btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
- btrfs_set_stack_inode_block_group(inode_item,
- BTRFS_I(inode)->block_group);
+ btrfs_set_stack_inode_block_group(inode_item, 0);
btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
inode->i_atime.tv_sec);
@@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_delayed_node *delayed_node;
- int ret;
+ int ret = 0;
delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 98b6a71decba..9f68c6898653 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1505,24 +1505,24 @@ static int transaction_kthread(void *arg)
vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
- spin_lock(&root->fs_info->new_trans_lock);
+ spin_lock(&root->fs_info->trans_lock);
cur = root->fs_info->running_transaction;
if (!cur) {
- spin_unlock(&root->fs_info->new_trans_lock);
+ spin_unlock(&root->fs_info->trans_lock);
goto sleep;
}
now = get_seconds();
if (!cur->blocked &&
(now < cur->start_time || now - cur->start_time < 30)) {
- spin_unlock(&root->fs_info->new_trans_lock);
+ spin_unlock(&root->fs_info->trans_lock);
delay = HZ * 5;
goto sleep;
}
transid = cur->transid;
- spin_unlock(&root->fs_info->new_trans_lock);
+ spin_unlock(&root->fs_info->trans_lock);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
if (transid == trans->transid) {
ret = btrfs_commit_transaction(trans, root);
@@ -1613,7 +1613,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->ordered_operations);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
spin_lock_init(&fs_info->delalloc_lock);
- spin_lock_init(&fs_info->new_trans_lock);
+ spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
@@ -1645,6 +1645,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->max_inline = 8192 * 1024;
fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT;
+ fs_info->trans_no_join = 0;
fs_info->thread_pool_size = min_t(unsigned long,
num_online_cpus() + 2, 8);
@@ -1667,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->scrub_pause_wait);
init_rwsem(&fs_info->scrub_super_lock);
fs_info->scrub_workers_refcnt = 0;
- btrfs_init_workers(&fs_info->scrub_workers, "scrub",
- fs_info->thread_pool_size, &fs_info->generic_worker);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
@@ -1709,7 +1708,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->do_barriers = 1;
- mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
mutex_init(&fs_info->chunk_mutex);
@@ -2479,13 +2477,13 @@ int btrfs_commit_super(struct btrfs_root *root)
down_write(&root->fs_info->cleanup_work_sem);
up_write(&root->fs_info->cleanup_work_sem);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
/* run commit again to drop the original snapshot */
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_commit_transaction(trans, root);
@@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
INIT_LIST_HEAD(&splice);
- list_splice_init(&root->fs_info->delalloc_inodes, &splice);
-
spin_lock(&root->fs_info->delalloc_lock);
+ list_splice_init(&root->fs_info->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
@@ -3024,10 +3021,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
WARN_ON(1);
- mutex_lock(&root->fs_info->trans_mutex);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ spin_lock(&root->fs_info->trans_lock);
list_splice_init(&root->fs_info->trans_list, &list);
+ root->fs_info->trans_no_join = 1;
+ spin_unlock(&root->fs_info->trans_lock);
+
while (!list_empty(&list)) {
t = list_entry(list.next, struct btrfs_transaction, list);
if (!t)
@@ -3052,23 +3052,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
t->blocked = 0;
if (waitqueue_active(&root->fs_info->transaction_wait))
wake_up(&root->fs_info->transaction_wait);
- mutex_unlock(&root->fs_info->trans_mutex);
- mutex_lock(&root->fs_info->trans_mutex);
t->commit_done = 1;
if (waitqueue_active(&t->commit_wait))
wake_up(&t->commit_wait);
- mutex_unlock(&root->fs_info->trans_mutex);
-
- mutex_lock(&root->fs_info->trans_mutex);
btrfs_destroy_pending_snapshots(t);
btrfs_destroy_delalloc_inodes(root);
- spin_lock(&root->fs_info->new_trans_lock);
+ spin_lock(&root->fs_info->trans_lock);
root->fs_info->running_transaction = NULL;
- spin_unlock(&root->fs_info->new_trans_lock);
+ spin_unlock(&root->fs_info->trans_lock);
btrfs_destroy_marked_extents(root, &t->dirty_pages,
EXTENT_DIRTY);
@@ -3082,8 +3077,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
kmem_cache_free(btrfs_transaction_cachep, t);
}
+ spin_lock(&root->fs_info->trans_lock);
+ root->fs_info->trans_no_join = 0;
+ spin_unlock(&root->fs_info->trans_lock);
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
- mutex_unlock(&root->fs_info->trans_mutex);
return 0;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 169bd62ce776..b42efc2ded51 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -348,7 +348,7 @@ static int caching_kthread(void *data)
*/
path->skip_locking = 1;
path->search_commit_root = 1;
- path->reada = 2;
+ path->reada = 1;
key.objectid = last;
key.offset = 0;
@@ -366,8 +366,7 @@ again:
nritems = btrfs_header_nritems(leaf);
while (1) {
- smp_mb();
- if (fs_info->closing > 1) {
+ if (btrfs_fs_closing(fs_info) > 1) {
last = (u64)-1;
break;
}
@@ -379,15 +378,18 @@ again:
if (ret)
break;
- caching_ctl->progress = last;
- btrfs_release_path(path);
- up_read(&fs_info->extent_commit_sem);
- mutex_unlock(&caching_ctl->mutex);
- if (btrfs_transaction_in_commit(fs_info))
- schedule_timeout(1);
- else
+ if (need_resched() ||
+ btrfs_next_leaf(extent_root, path)) {
+ caching_ctl->progress = last;
+ btrfs_release_path(path);
+ up_read(&fs_info->extent_commit_sem);
+ mutex_unlock(&caching_ctl->mutex);
cond_resched();
- goto again;
+ goto again;
+ }
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+ continue;
}
if (key.objectid < block_group->key.objectid) {
@@ -3065,7 +3067,7 @@ again:
spin_unlock(&data_sinfo->lock);
alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -3087,13 +3089,21 @@ alloc:
}
goto again;
}
+
+ /*
+ * If we have less pinned bytes than we want to allocate then
+ * don't bother committing the transaction, it won't help us.
+ */
+ if (data_sinfo->bytes_pinned < bytes)
+ committed = 1;
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
commit_trans:
- if (!committed && !root->fs_info->open_ioctl_trans) {
+ if (!committed &&
+ !atomic_read(&root->fs_info->open_ioctl_trans)) {
committed = 1;
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
@@ -3472,7 +3482,7 @@ again:
goto out;
ret = -ENOSPC;
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto out;
ret = btrfs_commit_transaction(trans, root);
@@ -3699,7 +3709,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
if (trans)
return -EAGAIN;
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
ret = btrfs_commit_transaction(trans, root);
return 0;
@@ -3837,6 +3847,37 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
}
+int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv)
+{
+ struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
+ u64 num_bytes;
+ int ret;
+
+ /*
+ * Truncate should be freeing data, but give us 2 items just in case it
+ * needs to use some space. We may want to be smarter about this in the
+ * future.
+ */
+ num_bytes = btrfs_calc_trans_metadata_size(root, 2);
+
+ /* We already have enough bytes, just return */
+ if (rsv->reserved >= num_bytes)
+ return 0;
+
+ num_bytes -= rsv->reserved;
+
+ /*
+ * You should have reserved enough space before hand to do this, so this
+ * should not fail.
+ */
+ ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
+ BUG_ON(ret);
+
+ return 0;
+}
+
int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int num_items)
@@ -3877,23 +3918,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
/*
- * one for deleting orphan item, one for updating inode and
- * two for calling btrfs_truncate_inode_items.
- *
- * btrfs_truncate_inode_items is a delete operation, it frees
- * more space than it uses in most cases. So two units of
- * metadata space should be enough for calling it many times.
- * If all of the metadata space is used, we can commit
- * transaction and use space it freed.
+ * We need to hold space in order to delete our orphan item once we've
+ * added it, so this takes the reservation so we can release it later
+ * when we are truly done with the orphan item.
*/
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
void btrfs_orphan_release_metadata(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
@@ -4987,6 +5023,15 @@ have_block_group:
if (unlikely(block_group->ro))
goto loop;
+ spin_lock(&block_group->free_space_ctl->tree_lock);
+ if (cached &&
+ block_group->free_space_ctl->free_space <
+ num_bytes + empty_size) {
+ spin_unlock(&block_group->free_space_ctl->tree_lock);
+ goto loop;
+ }
+ spin_unlock(&block_group->free_space_ctl->tree_lock);
+
/*
* Ok we want to try and use the cluster allocator, so lets look
* there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
@@ -5150,6 +5195,7 @@ checks:
btrfs_add_free_space(block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
+ btrfs_put_block_group(block_group);
break;
loop:
failed_cluster_refill = false;
@@ -5172,9 +5218,7 @@ loop:
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
- if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
- (found_uncached_bg || empty_size || empty_cluster ||
- allowed_chunk_alloc)) {
+ if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
@@ -5214,42 +5258,39 @@ loop:
goto search;
}
- if (loop < LOOP_CACHING_WAIT) {
- loop++;
- goto search;
- }
+ loop++;
if (loop == LOOP_ALLOC_CHUNK) {
- empty_size = 0;
- empty_cluster = 0;
- }
+ if (allowed_chunk_alloc) {
+ ret = do_chunk_alloc(trans, root, num_bytes +
+ 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_LIMITED);
+ allowed_chunk_alloc = 0;
+ if (ret == 1)
+ done_chunk_alloc = 1;
+ } else if (!done_chunk_alloc &&
+ space_info->force_alloc ==
+ CHUNK_ALLOC_NO_FORCE) {
+ space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+ }
- if (allowed_chunk_alloc) {
- ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data,
- CHUNK_ALLOC_LIMITED);
- allowed_chunk_alloc = 0;
- done_chunk_alloc = 1;
- } else if (!done_chunk_alloc &&
- space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
- space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+ /*
+ * We didn't allocate a chunk, go ahead and drop the
+ * empty size and loop again.
+ */
+ if (!done_chunk_alloc)
+ loop = LOOP_NO_EMPTY_SIZE;
}
- if (loop < LOOP_NO_EMPTY_SIZE) {
- loop++;
- goto search;
+ if (loop == LOOP_NO_EMPTY_SIZE) {
+ empty_size = 0;
+ empty_cluster = 0;
}
- ret = -ENOSPC;
+
+ goto search;
} else if (!ins->objectid) {
ret = -ENOSPC;
- }
-
- /* we found what we needed */
- if (ins->objectid) {
- if (!(data & BTRFS_BLOCK_GROUP_DATA))
- trans->block_group = block_group->key.objectid;
-
- btrfs_put_block_group(block_group);
+ } else if (ins->objectid) {
ret = 0;
}
@@ -6526,7 +6567,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
BUG_ON(cache->ro);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
alloc_flags = update_block_group_flags(root, cache->flags);
@@ -6882,6 +6923,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->reada = 1;
cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
if (cache_gen != 0 &&
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c5d9fbb92bc3..7055d11c1efd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
if (total_bytes >= max_bytes)
break;
if (!found) {
- *start = state->start;
+ *start = max(cur_start, state->start);
found = 1;
}
last = state->end;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4e8445a4757c..a11a92ee2d30 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -126,9 +126,9 @@ struct extent_buffer {
unsigned long map_len;
struct page *first_page;
unsigned long bflags;
- atomic_t refs;
struct list_head leak_list;
struct rcu_head rcu_head;
+ atomic_t refs;
/* the spinlock is used to protect most operations */
spinlock_t lock;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c6a22d783c35..fa4ef18b66b1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
if (!btrfs_test_opt(root, AUTO_DEFRAG))
return 0;
- if (root->fs_info->closing)
+ if (btrfs_fs_closing(root->fs_info))
return 0;
if (BTRFS_I(inode)->in_defrag)
@@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
if (!defrag)
return -ENOMEM;
- defrag->ino = inode->i_ino;
+ defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
@@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
first_ino = defrag->ino + 1;
rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
- if (fs_info->closing)
+ if (btrfs_fs_closing(fs_info))
goto next_free;
spin_unlock(&fs_info->defrag_inodes_lock);
@@ -1480,14 +1480,12 @@ int btrfs_sync_file(struct file *file, int datasync)
* the current transaction, we can bail out now without any
* syncing
*/
- mutex_lock(&root->fs_info->trans_mutex);
+ smp_mb();
if (BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) {
BTRFS_I(inode)->last_trans = 0;
- mutex_unlock(&root->fs_info->trans_mutex);
goto out;
}
- mutex_unlock(&root->fs_info->trans_mutex);
/*
* ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 70d45795d758..9f985a429877 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
return inode;
spin_lock(&block_group->lock);
- if (!root->fs_info->closing) {
+ if (!btrfs_fs_closing(root->fs_info)) {
block_group->inode = igrab(inode);
block_group->iref = 1;
}
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
pgoff_t index = 0;
unsigned long first_page_offset;
int num_checksums;
- int ret = 0, ret2;
+ int ret = 0;
INIT_LIST_HEAD(&bitmaps);
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
- BUG_ON(ret);
+ if (ret) {
+ printk(KERN_ERR "Duplicate entries in "
+ "free space cache, dumping\n");
+ kunmap(page);
+ unlock_page(page);
+ page_cache_release(page);
+ goto free_cache;
+ }
} else {
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) {
@@ -414,10 +421,18 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
goto free_cache;
}
spin_lock(&ctl->tree_lock);
- ret2 = link_free_space(ctl, e);
+ ret = link_free_space(ctl, e);
ctl->total_bitmaps++;
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
+ if (ret) {
+ printk(KERN_ERR "Duplicate entries in "
+ "free space cache, dumping\n");
+ kunmap(page);
+ unlock_page(page);
+ page_cache_release(page);
+ goto free_cache;
+ }
list_add_tail(&e->list, &bitmaps);
}
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
* If we're unmounting then just return, since this does a search on the
* normal root and not the commit root and we could deadlock.
*/
- smp_mb();
- if (fs_info->closing)
+ if (btrfs_fs_closing(fs_info))
return 0;
/*
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
+
+ /* Since the first page has all of our checksums and our generation we
+ * need to calculate the offset into the page that we can start writing
+ * our entries.
+ */
+ first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
+
filemap_write_and_wait(inode->i_mapping);
btrfs_wait_ordered_range(inode, inode->i_size &
~(root->sectorsize - 1), (u64)-1);
+ /* make sure we don't overflow that first page */
+ if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
+ /* this is really the same as running out of space, where we also return 0 */
+ printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
+ ret = 0;
+ goto out_update;
+ }
+
/* We need a checksum per page. */
crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
if (!crc)
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
return -1;
}
- /* Since the first page has all of our checksums and our generation we
- * need to calculate the offset into the page that we can start writing
- * our entries.
- */
- first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
-
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list))
cluster = list_entry(block_group->cluster_list.next,
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
ret = 1;
out_free:
+ kfree(checksums);
+ kfree(pages);
+
+out_update:
if (ret != 1) {
invalidate_inode_pages2_range(inode->i_mapping, 0, index);
BTRFS_I(inode)->generation = 0;
}
- kfree(checksums);
- kfree(pages);
btrfs_update_inode(trans, root, inode);
return ret;
}
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
* logically.
*/
if (bitmap) {
- WARN_ON(info->bitmap);
+ if (info->bitmap) {
+ WARN_ON_ONCE(1);
+ return -EEXIST;
+ }
p = &(*p)->rb_right;
} else {
- WARN_ON(!info->bitmap);
+ if (!info->bitmap) {
+ WARN_ON_ONCE(1);
+ return -EEXIST;
+ }
p = &(*p)->rb_left;
}
}
@@ -1386,6 +1417,23 @@ again:
return 0;
}
+static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
+{
+ u64 bytes_to_set = 0;
+ u64 end;
+
+ end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
+
+ bytes_to_set = min(end - offset, bytes);
+
+ bitmap_set_bits(ctl, info, offset, bytes_to_set);
+
+ return bytes_to_set;
+
+}
+
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
@@ -1422,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
return true;
}
+static struct btrfs_free_space_op free_space_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_free_space *bitmap_info;
+ struct btrfs_block_group_cache *block_group = NULL;
int added = 0;
- u64 bytes, offset, end;
+ u64 bytes, offset, bytes_added;
int ret;
bytes = info->bytes;
@@ -1436,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
if (!ctl->op->use_bitmap(ctl, info))
return 0;
+ if (ctl->op == &free_space_op)
+ block_group = ctl->private;
again:
+ /*
+ * Since we link bitmaps right into the cluster we need to see if we
+ * have a cluster here, and if so and it has our bitmap we need to add
+ * the free space to that bitmap.
+ */
+ if (block_group && !list_empty(&block_group->cluster_list)) {
+ struct btrfs_free_cluster *cluster;
+ struct rb_node *node;
+ struct btrfs_free_space *entry;
+
+ cluster = list_entry(block_group->cluster_list.next,
+ struct btrfs_free_cluster,
+ block_group_list);
+ spin_lock(&cluster->lock);
+ node = rb_first(&cluster->root);
+ if (!node) {
+ spin_unlock(&cluster->lock);
+ goto no_cluster_bitmap;
+ }
+
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ if (!entry->bitmap) {
+ spin_unlock(&cluster->lock);
+ goto no_cluster_bitmap;
+ }
+
+ if (entry->offset == offset_to_bitmap(ctl, offset)) {
+ bytes_added = add_bytes_to_bitmap(ctl, entry,
+ offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ }
+ spin_unlock(&cluster->lock);
+ if (!bytes) {
+ ret = 1;
+ goto out;
+ }
+ }
+
+no_cluster_bitmap:
bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!bitmap_info) {
@@ -1444,19 +1540,10 @@ again:
goto new_bitmap;
}
- end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
-
- if (offset >= bitmap_info->offset && offset + bytes > end) {
- bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
- bytes -= end - offset;
- offset = end;
- added = 0;
- } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
- bitmap_set_bits(ctl, bitmap_info, offset, bytes);
- bytes = 0;
- } else {
- BUG();
- }
+ bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ added = 0;
if (!bytes) {
ret = 1;
@@ -1735,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
"\n", count);
}
-static struct btrfs_free_space_op free_space_op = {
- .recalc_thresholds = recalculate_thresholds,
- .use_bitmap = use_bitmap,
-};
-
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2111,9 +2193,11 @@ again:
/*
* This searches the block group for just extents to fill the cluster with.
*/
-static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ struct list_head *bitmaps, u64 offset, u64 bytes,
+ u64 min_bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *first = NULL;
@@ -2135,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* extent entry.
*/
while (entry->bitmap) {
+ if (list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
node = rb_next(&entry->offset_index);
if (!node)
return -ENOSPC;
@@ -2154,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
return -ENOSPC;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- if (entry->bitmap)
+ if (entry->bitmap) {
+ if (list_empty(&entry->list))
+ list_add_tail(&entry->list, bitmaps);
continue;
+ }
+
/*
* we haven't filled the empty size and the window is
* very large. reset and try again
@@ -2207,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
* This specifically looks for bitmaps that may work in the cluster, we assume
* that we have already failed to find extents that will work.
*/
-static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ struct list_head *bitmaps, u64 offset, u64 bytes,
+ u64 min_bytes)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
@@ -2219,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
if (ctl->total_bitmaps == 0)
return -ENOSPC;
+ /*
+ * First check our cached list of bitmaps and see if there is an entry
+ * here that will work.
+ */
+ list_for_each_entry(entry, bitmaps, list) {
+ if (entry->bytes < min_bytes)
+ continue;
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
+ bytes, min_bytes);
+ if (!ret)
+ return 0;
+ }
+
+ /*
+ * If we do have entries on our list and we are here then we didn't find
+ * anything, so go ahead and get the next entry after the last entry in
+ * this list and start the search from there.
+ */
+ if (!list_empty(bitmaps)) {
+ entry = list_entry(bitmaps->prev, struct btrfs_free_space,
+ list);
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ goto search;
+ }
+
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
if (!entry)
return -ENOSPC;
+search:
node = &entry->offset_index;
do {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2253,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct list_head bitmaps;
+ struct btrfs_free_space *entry, *tmp;
u64 min_bytes;
int ret;
@@ -2291,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
goto out;
}
- ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
- min_bytes);
+ INIT_LIST_HEAD(&bitmaps);
+ ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
+ bytes, min_bytes);
if (ret)
- ret = setup_cluster_bitmap(block_group, cluster, offset,
- bytes, min_bytes);
+ ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
+ offset, bytes, min_bytes);
+
+ /* Clear our temporary list */
+ list_for_each_entry_safe(entry, tmp, &bitmaps, list)
+ list_del_init(&entry->list);
if (!ret) {
atomic_inc(&block_group->count);
@@ -2481,7 +2609,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
return inode;
spin_lock(&root->cache_lock);
- if (!root->fs_info->closing)
+ if (!btrfs_fs_closing(root->fs_info))
root->cache_inode = igrab(inode);
spin_unlock(&root->cache_lock);
@@ -2504,12 +2632,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return 0;
+
/*
* If we're unmounting then just return, since this does a search on the
* normal root and not the commit root and we could deadlock.
*/
- smp_mb();
- if (fs_info->closing)
+ if (btrfs_fs_closing(fs_info))
return 0;
path = btrfs_alloc_path();
@@ -2543,6 +2673,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct inode *inode;
int ret;
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return 0;
+
inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode))
return 0;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 3262cd17a12f..b4087e0fa871 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,6 +38,9 @@ static int caching_kthread(void *data)
int slot;
int ret;
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return 0;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -59,8 +62,7 @@ again:
goto out;
while (1) {
- smp_mb();
- if (fs_info->closing)
+ if (btrfs_fs_closing(fs_info))
goto out;
leaf = path->nodes[0];
@@ -141,6 +143,9 @@ static void start_caching(struct btrfs_root *root)
int ret;
u64 objectid;
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return;
+
spin_lock(&root->cache_lock);
if (root->cached != BTRFS_CACHE_NO) {
spin_unlock(&root->cache_lock);
@@ -178,6 +183,9 @@ static void start_caching(struct btrfs_root *root)
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
{
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return btrfs_find_free_objectid(root, objectid);
+
again:
*objectid = btrfs_find_ino_for_alloc(root);
@@ -201,6 +209,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return;
+
again:
if (root->cached == BTRFS_CACHE_FINISHED) {
__btrfs_add_free_space(ctl, objectid, 1);
@@ -250,6 +262,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
struct rb_node *n;
u64 count;
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return;
+
while (1) {
n = rb_first(rbroot);
if (!n)
@@ -388,9 +403,24 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
int prealloc;
bool retry = false;
+ /* only fs tree and subvol/snap needs ino cache */
+ if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
+ (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
+ return 0;
+
+ /* Don't save inode cache if we are deleting this root */
+ if (btrfs_root_refs(&root->root_item) == 0 &&
+ root != root->fs_info->tree_root)
+ return 0;
+
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return 0;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+
again:
inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 39a9d5750efd..751ddf8fc58a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -138,7 +138,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->leave_spinning = 1;
- btrfs_set_trans_block_group(trans, inode);
key.objectid = btrfs_ino(inode);
key.offset = start;
@@ -426,9 +425,8 @@ again:
}
}
if (start == 0) {
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
/* lets try to make an inline extent */
@@ -623,8 +621,9 @@ retry:
async_extent->start + async_extent->ram_size - 1,
GFP_NOFS);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
+ trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
@@ -793,9 +792,8 @@ static noinline int cow_file_range(struct inode *inode,
int ret = 0;
BUG_ON(is_free_space_inode(root, inode));
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -1077,10 +1075,12 @@ static noinline int run_delalloc_nocow(struct inode *inode,
nolock = is_free_space_inode(root, inode);
if (nolock)
- trans = btrfs_join_transaction_nolock(root, 1);
+ trans = btrfs_join_transaction_nolock(root);
else
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
+
BUG_ON(IS_ERR(trans));
+ trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
@@ -1519,8 +1519,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
{
struct btrfs_ordered_sum *sum;
- btrfs_set_trans_block_group(trans, inode);
-
list_for_each_entry(sum, list, list) {
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
@@ -1735,11 +1733,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret) {
if (nolock)
- trans = btrfs_join_transaction_nolock(root, 1);
+ trans = btrfs_join_transaction_nolock(root);
else
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
@@ -1752,11 +1749,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
0, &cached_state, GFP_NOFS);
if (nolock)
- trans = btrfs_join_transaction_nolock(root, 1);
+ trans = btrfs_join_transaction_nolock(root);
else
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1990,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
- return 0;
+ goto good;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2431,7 +2427,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
(u64)-1);
if (root->orphan_block_rsv || root->orphan_item_inserted) {
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans, root);
}
@@ -2511,12 +2507,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key location;
int maybe_acls;
- u64 alloc_group_block;
u32 rdev;
int ret;
path = btrfs_alloc_path();
BUG_ON(!path);
+ path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -2526,6 +2522,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
+ if (!leaf->map_token)
+ map_private_extent_buffer(leaf, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item),
+ &leaf->map_token, &leaf->kaddr,
+ &leaf->map_start, &leaf->map_len,
+ KM_USER1);
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
@@ -2555,8 +2557,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
- alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
-
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
@@ -2566,8 +2566,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
if (!maybe_acls)
cache_no_acl(inode);
- BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
- alloc_group_block, 0);
+ if (leaf->map_token) {
+ unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+ leaf->map_token = NULL;
+ }
+
btrfs_free_path(path);
inode_item = NULL;
@@ -2647,7 +2650,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
- btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+ btrfs_set_inode_block_group(leaf, item, 0);
if (leaf->map_token) {
unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
@@ -3004,8 +3007,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
-
btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
@@ -3094,8 +3095,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
-
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
@@ -3514,7 +3513,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
err = PTR_ERR(trans);
break;
}
- btrfs_set_trans_block_group(trans, inode);
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
@@ -3648,9 +3646,8 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_i_size_write(inode, 0);
while (1) {
- trans = btrfs_start_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_block_rsv_check(trans, root,
@@ -4133,7 +4130,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->reada = 2;
+
+ path->reada = 1;
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
@@ -4268,18 +4266,16 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (BTRFS_I(inode)->dummy_inode)
return 0;
- smp_mb();
- if (root->fs_info->closing && is_free_space_inode(root, inode))
+ if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
- trans = btrfs_join_transaction_nolock(root, 1);
+ trans = btrfs_join_transaction_nolock(root);
else
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, inode);
if (nolock)
ret = btrfs_end_transaction_nolock(trans, root);
else
@@ -4303,9 +4299,8 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
if (BTRFS_I(inode)->dummy_inode)
return;
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
@@ -4319,7 +4314,6 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
PTR_ERR(trans));
return;
}
- btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
@@ -4418,8 +4412,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
- u64 ref_objectid, u64 objectid,
- u64 alloc_hint, int mode, u64 *index)
+ u64 ref_objectid, u64 objectid, int mode,
+ u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
@@ -4472,8 +4466,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
owner = 0;
else
owner = 1;
- BTRFS_I(inode)->block_group =
- btrfs_find_block_group(root, 0, alloc_hint, owner);
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -4629,15 +4621,13 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
-
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
- BTRFS_I(dir)->block_group, mode, &index);
+ mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
@@ -4649,7 +4639,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- btrfs_set_trans_block_group(trans, inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
@@ -4658,8 +4647,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
}
- btrfs_update_inode_block_group(trans, inode);
- btrfs_update_inode_block_group(trans, dir);
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
@@ -4692,15 +4679,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
-
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
- BTRFS_I(dir)->block_group, mode, &index);
+ mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
@@ -4712,7 +4697,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- btrfs_set_trans_block_group(trans, inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
@@ -4723,8 +4707,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
- btrfs_update_inode_block_group(trans, inode);
- btrfs_update_inode_block_group(trans, dir);
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
@@ -4771,8 +4753,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_inc_nlink(inode);
inode->i_ctime = CURRENT_TIME;
-
- btrfs_set_trans_block_group(trans, dir);
ihold(inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -4781,7 +4761,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
drop_inode = 1;
} else {
struct dentry *parent = dget_parent(dentry);
- btrfs_update_inode_block_group(trans, dir);
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
btrfs_log_new_name(trans, inode, NULL, parent);
@@ -4818,7 +4797,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
err = btrfs_find_free_ino(root, &objectid);
if (err)
@@ -4826,8 +4804,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
- BTRFS_I(dir)->block_group, S_IFDIR | mode,
- &index);
+ S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
@@ -4841,7 +4818,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
- btrfs_set_trans_block_group(trans, inode);
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
@@ -4855,8 +4831,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
d_instantiate(dentry, inode);
drop_on_err = 0;
- btrfs_update_inode_block_group(trans, inode);
- btrfs_update_inode_block_group(trans, dir);
out_fail:
nr = trans->blocks_used;
@@ -4989,7 +4963,15 @@ again:
if (!path) {
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /*
+ * Chances are we'll be called again, so go ahead and do
+ * readahead
+ */
+ path->reada = 1;
}
ret = btrfs_lookup_file_extent(trans, root, path,
@@ -5130,8 +5112,10 @@ again:
kunmap(page);
free_extent_map(em);
em = NULL;
+
btrfs_release_path(path);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
+
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
@@ -5375,7 +5359,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
}
- trans = btrfs_join_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
@@ -5611,7 +5595,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
* to make sure the current transaction stays open
* while we look for nocow cross refs
*/
- trans = btrfs_join_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto must_cow;
@@ -5750,7 +5734,7 @@ again:
BUG_ON(!ordered);
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
err = -ENOMEM;
goto out;
@@ -6500,6 +6484,7 @@ out:
static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_block_rsv *rsv;
int ret;
int err = 0;
struct btrfs_trans_handle *trans;
@@ -6513,28 +6498,80 @@ static int btrfs_truncate(struct inode *inode)
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ /*
+ * Yes ladies and gentelment, this is indeed ugly. The fact is we have
+ * 3 things going on here
+ *
+ * 1) We need to reserve space for our orphan item and the space to
+ * delete our orphan item. Lord knows we don't want to have a dangling
+ * orphan item because we didn't reserve space to remove it.
+ *
+ * 2) We need to reserve space to update our inode.
+ *
+ * 3) We need to have something to cache all the space that is going to
+ * be free'd up by the truncate operation, but also have some slack
+ * space reserved in case it uses space during the truncate (thank you
+ * very much snapshotting).
+ *
+ * And we need these to all be seperate. The fact is we can use alot of
+ * space doing the truncate, and we have no earthly idea how much space
+ * we will use, so we need the truncate reservation to be seperate so it
+ * doesn't end up using space reserved for updating the inode or
+ * removing the orphan item. We also need to be able to stop the
+ * transaction and start a new one, which means we need to be able to
+ * update the inode several times, and we have no idea of knowing how
+ * many times that will be, so we can't just reserve 1 item for the
+ * entirety of the opration, so that has to be done seperately as well.
+ * Then there is the orphan item, which does indeed need to be held on
+ * to for the whole operation, and we need nobody to touch this reserved
+ * space except the orphan code.
+ *
+ * So that leaves us with
+ *
+ * 1) root->orphan_block_rsv - for the orphan deletion.
+ * 2) rsv - for the truncate reservation, which we will steal from the
+ * transaction reservation.
+ * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
+ * updating the inode.
+ */
+ rsv = btrfs_alloc_block_rsv(root);
+ if (!rsv)
+ return -ENOMEM;
+ btrfs_add_durable_block_rsv(root->fs_info, rsv);
+
+ trans = btrfs_start_transaction(root, 4);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out;
+ }
- btrfs_set_trans_block_group(trans, inode);
+ /*
+ * Reserve space for the truncate process. Truncate should be adding
+ * space, but if there are snapshots it may end up using space.
+ */
+ ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
+ BUG_ON(ret);
ret = btrfs_orphan_add(trans, inode);
if (ret) {
btrfs_end_transaction(trans, root);
- return ret;
+ goto out;
}
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
- /* Now start a transaction for the truncate */
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, inode);
- trans->block_rsv = root->orphan_block_rsv;
+ /*
+ * Ok so we've already migrated our bytes over for the truncate, so here
+ * just reserve the one slot we need for updating the inode.
+ */
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out;
+ }
+ trans->block_rsv = rsv;
/*
* setattr is responsible for setting the ordered_data_close flag,
@@ -6558,24 +6595,17 @@ static int btrfs_truncate(struct inode *inode)
while (1) {
if (!trans) {
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, inode);
- trans->block_rsv = root->orphan_block_rsv;
- }
+ trans = btrfs_start_transaction(root, 3);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out;
+ }
- ret = btrfs_block_rsv_check(trans, root,
- root->orphan_block_rsv, 0, 5);
- if (ret == -EAGAIN) {
- ret = btrfs_commit_transaction(trans, root);
- if (ret)
- return ret;
- trans = NULL;
- continue;
- } else if (ret) {
- err = ret;
- break;
+ ret = btrfs_truncate_reserve_metadata(trans, root,
+ rsv);
+ BUG_ON(ret);
+
+ trans->block_rsv = rsv;
}
ret = btrfs_truncate_inode_items(trans, root, inode,
@@ -6586,6 +6616,7 @@ static int btrfs_truncate(struct inode *inode)
break;
}
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
@@ -6599,6 +6630,7 @@ static int btrfs_truncate(struct inode *inode)
}
if (ret == 0 && inode->i_nlink > 0) {
+ trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
if (ret)
err = ret;
@@ -6610,15 +6642,20 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_orphan_del(NULL, inode);
}
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+out:
+ btrfs_free_block_rsv(root, rsv);
+
if (ret && !err)
err = ret;
- btrfs_btree_balance_dirty(root, nr);
return err;
}
@@ -6627,15 +6664,14 @@ static int btrfs_truncate(struct inode *inode)
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root,
- u64 new_dirid, u64 alloc_hint)
+ struct btrfs_root *new_root, u64 new_dirid)
{
struct inode *inode;
int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
- new_dirid, alloc_hint, S_IFDIR | 0700, &index);
+ new_dirid, S_IFDIR | 0700, &index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
@@ -6748,21 +6784,6 @@ void btrfs_destroy_inode(struct inode *inode)
spin_unlock(&root->fs_info->ordered_extent_lock);
}
- if (root == root->fs_info->tree_root) {
- struct btrfs_block_group_cache *block_group;
-
- block_group = btrfs_lookup_block_group(root->fs_info,
- BTRFS_I(inode)->block_group);
- if (block_group && block_group->inode == inode) {
- spin_lock(&block_group->lock);
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- btrfs_put_block_group(block_group);
- } else if (block_group) {
- btrfs_put_block_group(block_group);
- }
- }
-
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
@@ -6948,8 +6969,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_notrans;
}
- btrfs_set_trans_block_group(trans, new_dir);
-
if (dest != root)
btrfs_record_root_in_trans(trans, dest);
@@ -7131,16 +7150,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, dir);
-
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
- BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
- &index);
+ S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
@@ -7152,7 +7168,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- btrfs_set_trans_block_group(trans, inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
@@ -7163,8 +7178,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
- btrfs_update_inode_block_group(trans, inode);
- btrfs_update_inode_block_group(trans, dir);
if (drop_inode)
goto out_unlock;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 85e818ce00c5..b793d112d1f6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
- trans = btrfs_join_transaction(root, 1);
+ trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
ret = btrfs_update_inode(trans, root, inode);
@@ -414,8 +414,7 @@ static noinline int create_subvol(struct btrfs_root *root,
btrfs_record_root_in_trans(trans, new_root);
- ret = btrfs_create_subvol_root(trans, new_root, new_dirid,
- BTRFS_I(dir)->block_group);
+ ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
/*
* insert the directory item
*/
@@ -707,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root,
struct btrfs_file_extent_item *extent;
int type;
int ret;
+ u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- min_key.objectid = inode->i_ino;
+ min_key.objectid = ino;
min_key.type = BTRFS_EXTENT_DATA_KEY;
min_key.offset = *off;
- max_key.objectid = inode->i_ino;
+ max_key.objectid = ino;
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
@@ -727,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root,
path, 0, newer_than);
if (ret != 0)
goto none;
- if (min_key.objectid != inode->i_ino)
+ if (min_key.objectid != ino)
goto none;
if (min_key.type != BTRFS_EXTENT_DATA_KEY)
goto none;
@@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
{
- struct btrfs_ioctl_fs_info_args fi_args;
+ struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_device *next;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- fi_args.num_devices = fs_devices->num_devices;
- fi_args.max_id = 0;
- memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
+ fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
+ if (!fi_args)
+ return -ENOMEM;
+
+ fi_args->num_devices = fs_devices->num_devices;
+ memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
- if (device->devid > fi_args.max_id)
- fi_args.max_id = device->devid;
+ if (device->devid > fi_args->max_id)
+ fi_args->max_id = device->devid;
}
mutex_unlock(&fs_devices->device_list_mutex);
- if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
- return -EFAULT;
+ if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
+ ret = -EFAULT;
- return 0;
+ kfree(fi_args);
+ return ret;
}
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
@@ -2489,12 +2494,10 @@ static long btrfs_ioctl_trans_start(struct file *file)
if (ret)
goto out;
- mutex_lock(&root->fs_info->trans_mutex);
- root->fs_info->open_ioctl_trans++;
- mutex_unlock(&root->fs_info->trans_mutex);
+ atomic_inc(&root->fs_info->open_ioctl_trans);
ret = -ENOMEM;
- trans = btrfs_start_ioctl_transaction(root, 0);
+ trans = btrfs_start_ioctl_transaction(root);
if (IS_ERR(trans))
goto out_drop;
@@ -2502,9 +2505,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
return 0;
out_drop:
- mutex_lock(&root->fs_info->trans_mutex);
- root->fs_info->open_ioctl_trans--;
- mutex_unlock(&root->fs_info->trans_mutex);
+ atomic_dec(&root->fs_info->open_ioctl_trans);
mnt_drop_write(file->f_path.mnt);
out:
return ret;
@@ -2738,9 +2739,7 @@ long btrfs_ioctl_trans_end(struct file *file)
btrfs_end_transaction(trans, root);
- mutex_lock(&root->fs_info->trans_mutex);
- root->fs_info->open_ioctl_trans--;
- mutex_unlock(&root->fs_info->trans_mutex);
+ atomic_dec(&root->fs_info->open_ioctl_trans);
mnt_drop_write(file->f_path.mnt);
return 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ca38eca70af0..b1ef27cc673b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -677,6 +677,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
err = -ENOMEM;
goto out;
}
+ path1->reada = 1;
+ path2->reada = 2;
node = alloc_backref_node(cache);
if (!node) {
@@ -1999,6 +2001,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->reada = 1;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
@@ -2139,10 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err)
u64 num_bytes = 0;
int ret;
- mutex_lock(&root->fs_info->trans_mutex);
+ spin_lock(&root->fs_info->trans_lock);
rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2;
- mutex_unlock(&root->fs_info->trans_mutex);
+ spin_unlock(&root->fs_info->trans_lock);
again:
if (!err) {
num_bytes = rc->merging_rsv_size;
@@ -2152,7 +2155,7 @@ again:
err = ret;
}
- trans = btrfs_join_transaction(rc->extent_root, 1);
+ trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
btrfs_block_rsv_release(rc->extent_root,
@@ -2211,9 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc)
int ret;
again:
root = rc->extent_root;
- mutex_lock(&root->fs_info->trans_mutex);
+ spin_lock(&root->fs_info->trans_lock);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- mutex_unlock(&root->fs_info->trans_mutex);
+ spin_unlock(&root->fs_info->trans_lock);
while (!list_empty(&reloc_roots)) {
found = 1;
@@ -3236,7 +3239,7 @@ truncate:
goto out;
}
- trans = btrfs_join_transaction(root, 0);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_free_path(path);
ret = PTR_ERR(trans);
@@ -3300,6 +3303,7 @@ static int find_data_references(struct reloc_control *rc,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->reada = 1;
root = read_fs_root(rc->extent_root->fs_info, ref_root);
if (IS_ERR(root)) {
@@ -3586,17 +3590,17 @@ next:
static void set_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- mutex_lock(&fs_info->trans_mutex);
+ spin_lock(&fs_info->trans_lock);
fs_info->reloc_ctl = rc;
- mutex_unlock(&fs_info->trans_mutex);
+ spin_unlock(&fs_info->trans_lock);
}
static void unset_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- mutex_lock(&fs_info->trans_mutex);
+ spin_lock(&fs_info->trans_lock);
fs_info->reloc_ctl = NULL;
- mutex_unlock(&fs_info->trans_mutex);
+ spin_unlock(&fs_info->trans_lock);
}
static int check_extent_flags(u64 flags)
@@ -3645,7 +3649,7 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->create_reloc_tree = 1;
set_reloc_control(rc);
- trans = btrfs_join_transaction(rc->extent_root, 1);
+ trans = btrfs_join_transaction(rc->extent_root);
BUG_ON(IS_ERR(trans));
btrfs_commit_transaction(trans, rc->extent_root);
return 0;
@@ -3668,6 +3672,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->reada = 1;
ret = prepare_to_relocate(rc);
if (ret) {
@@ -3834,7 +3839,7 @@ restart:
btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
- trans = btrfs_join_transaction(rc->extent_root, 1);
+ trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans))
err = PTR_ERR(trans);
else
@@ -4093,6 +4098,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->reada = -1;
key.objectid = BTRFS_TREE_RELOC_OBJECTID;
key.type = BTRFS_ROOT_ITEM_KEY;
@@ -4159,7 +4165,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
set_reloc_control(rc);
- trans = btrfs_join_transaction(rc->extent_root, 1);
+ trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
unset_reloc_control(rc);
err = PTR_ERR(trans);
@@ -4193,7 +4199,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
unset_reloc_control(rc);
- trans = btrfs_join_transaction(rc->extent_root, 1);
+ trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans))
err = PTR_ERR(trans);
else
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dfed0c27ac3..a8d03d5efb5d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,13 +16,7 @@
* Boston, MA 021110-1307, USA.
*/
-#include <linux/sched.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
#include "ctree.h"
#include "volumes.h"
#include "disk-io.h"
@@ -117,33 +111,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
}
}
+static void scrub_free_bio(struct bio *bio)
+{
+ int i;
+ struct page *last_page = NULL;
+
+ if (!bio)
+ return;
+
+ for (i = 0; i < bio->bi_vcnt; ++i) {
+ if (bio->bi_io_vec[i].bv_page == last_page)
+ continue;
+ last_page = bio->bi_io_vec[i].bv_page;
+ __free_page(last_page);
+ }
+ bio_put(bio);
+}
+
static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
{
int i;
- int j;
- struct page *last_page;
if (!sdev)
return;
for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
struct scrub_bio *sbio = sdev->bios[i];
- struct bio *bio;
if (!sbio)
break;
- bio = sbio->bio;
- if (bio) {
- last_page = NULL;
- for (j = 0; j < bio->bi_vcnt; ++j) {
- if (bio->bi_io_vec[j].bv_page == last_page)
- continue;
- last_page = bio->bi_io_vec[j].bv_page;
- __free_page(last_page);
- }
- bio_put(bio);
- }
+ scrub_free_bio(sbio->bio);
kfree(sbio);
}
@@ -156,8 +154,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
{
struct scrub_dev *sdev;
int i;
- int j;
- int ret;
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +161,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
goto nomem;
sdev->dev = dev;
for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
- struct bio *bio;
struct scrub_bio *sbio;
sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +168,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
goto nomem;
sdev->bios[i] = sbio;
- bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
- if (!bio)
- goto nomem;
-
sbio->index = i;
sbio->sdev = sdev;
- sbio->bio = bio;
sbio->count = 0;
sbio->work.func = scrub_checksum;
- bio->bi_private = sdev->bios[i];
- bio->bi_end_io = scrub_bio_end_io;
- bio->bi_sector = 0;
- bio->bi_bdev = dev->bdev;
- bio->bi_size = 0;
-
- for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
- struct page *page;
- page = alloc_page(GFP_NOFS);
- if (!page)
- goto nomem;
-
- ret = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (!ret)
- goto nomem;
- }
- WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
if (i != SCRUB_BIOS_PER_DEV-1)
sdev->bios[i]->next_free = i + 1;
@@ -369,9 +342,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
int ret;
DECLARE_COMPLETION_ONSTACK(complete);
- /* we are going to wait on this IO */
- rw |= REQ_SYNC;
-
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = bdev;
bio->bi_sector = sector;
@@ -380,6 +350,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
bio->bi_private = &complete;
submit_bio(rw, bio);
+ /* this will also unplug the queue */
wait_for_completion(&complete);
ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -394,6 +365,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
sbio->err = err;
+ sbio->bio = bio;
btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
}
@@ -453,6 +425,8 @@ static void scrub_checksum(struct btrfs_work *work)
}
out:
+ scrub_free_bio(sbio->bio);
+ sbio->bio = NULL;
spin_lock(&sdev->list_lock);
sbio->next_free = sdev->first_free;
sdev->first_free = sbio->index;
@@ -583,25 +557,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
static int scrub_submit(struct scrub_dev *sdev)
{
struct scrub_bio *sbio;
+ struct bio *bio;
+ int i;
if (sdev->curr == -1)
return 0;
sbio = sdev->bios[sdev->curr];
- sbio->bio->bi_sector = sbio->physical >> 9;
- sbio->bio->bi_size = sbio->count * PAGE_SIZE;
- sbio->bio->bi_next = NULL;
- sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
- sbio->bio->bi_comp_cpu = -1;
- sbio->bio->bi_bdev = sdev->dev->bdev;
+ bio = bio_alloc(GFP_NOFS, sbio->count);
+ if (!bio)
+ goto nomem;
+
+ bio->bi_private = sbio;
+ bio->bi_end_io = scrub_bio_end_io;
+ bio->bi_bdev = sdev->dev->bdev;
+ bio->bi_sector = sbio->physical >> 9;
+
+ for (i = 0; i < sbio->count; ++i) {
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ goto nomem;
+
+ ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (!ret) {
+ __free_page(page);
+ goto nomem;
+ }
+ }
+
sbio->err = 0;
sdev->curr = -1;
atomic_inc(&sdev->in_flight);
- submit_bio(0, sbio->bio);
+ submit_bio(READ, bio);
return 0;
+
+nomem:
+ scrub_free_bio(bio);
+
+ return -ENOMEM;
}
static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +632,11 @@ again:
sbio->logical = logical;
} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
sbio->logical + sbio->count * PAGE_SIZE != logical) {
- scrub_submit(sdev);
+ int ret;
+
+ ret = scrub_submit(sdev);
+ if (ret)
+ return ret;
goto again;
}
sbio->spag[sbio->count].flags = flags;
@@ -645,8 +648,13 @@ again:
memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
}
++sbio->count;
- if (sbio->count == SCRUB_PAGES_PER_BIO || force)
- scrub_submit(sdev);
+ if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
+ int ret;
+
+ ret = scrub_submit(sdev);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -727,6 +735,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
+ struct blk_plug plug;
u64 flags;
int ret;
int slot;
@@ -789,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
-
- l = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid != logical) {
- ret = btrfs_previous_item(root, path, 0,
- BTRFS_EXTENT_ITEM_KEY);
- if (ret < 0)
- goto out;
- }
+ goto out_noplug;
+ /*
+ * we might miss half an extent here, but that doesn't matter,
+ * as it's only the prefetch
+ */
while (1) {
l = path->nodes[0];
slot = path->slots[0];
@@ -809,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
if (ret == 0)
continue;
if (ret < 0)
- goto out;
+ goto out_noplug;
break;
}
@@ -831,6 +834,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
* the scrub. This might currently (crc32) end up to be about 1MB
*/
start_stripe = 0;
+ blk_start_plug(&plug);
again:
logical = base + offset + start_stripe * increment;
for (i = start_stripe; i < nstripes; ++i) {
@@ -890,15 +894,20 @@ again:
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
-
- l = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid != logical) {
+ if (ret > 0) {
ret = btrfs_previous_item(root, path, 0,
BTRFS_EXTENT_ITEM_KEY);
if (ret < 0)
goto out;
+ if (ret > 0) {
+ /* there's no smaller item, so stick with the
+ * larger one */
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(NULL, root, &key,
+ path, 0, 0);
+ if (ret < 0)
+ goto out;
+ }
}
while (1) {
@@ -972,6 +981,8 @@ next:
scrub_submit(sdev);
out:
+ blk_finish_plug(&plug);
+out_noplug:
btrfs_free_path(path);
return ret < 0 ? ret : 0;
}
@@ -1047,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto out;
- ret = 0;
+ break;
+ if (ret > 0) {
+ if (path->slots[0] >=
+ btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret)
+ break;
+ }
+ }
l = path->nodes[0];
slot = path->slots[0];
@@ -1058,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
if (found_key.objectid != sdev->dev->devid)
break;
- if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+ if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
break;
if (found_key.offset >= end)
@@ -1087,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!cache) {
ret = -ENOENT;
- goto out;
+ break;
}
ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
chunk_offset, length);
@@ -1099,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
btrfs_release_path(path);
}
-out:
btrfs_free_path(path);
- return ret;
+
+ /*
+ * ret can still be 1 from search_slot or next_leaf,
+ * that's not an error
+ */
+ return ret < 0 ? ret : 0;
}
static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
@@ -1138,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->scrub_lock);
- if (fs_info->scrub_workers_refcnt == 0)
+ if (fs_info->scrub_workers_refcnt == 0) {
+ btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+ fs_info->thread_pool_size, &fs_info->generic_worker);
+ fs_info->scrub_workers.idle_thresh = 4;
btrfs_start_workers(&fs_info->scrub_workers, 1);
+ }
++fs_info->scrub_workers_refcnt;
mutex_unlock(&fs_info->scrub_lock);
@@ -1166,7 +1192,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
int ret;
struct btrfs_device *dev;
- if (root->fs_info->closing)
+ if (btrfs_fs_closing(root->fs_info))
return -EINVAL;
/*
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9b2e7e5bc3ef..0bb4ebbb71b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -161,7 +161,8 @@ enum {
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
- Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err,
+ Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
+ Opt_inode_cache, Opt_err,
};
static match_table_t tokens = {
@@ -193,6 +194,7 @@ static match_table_t tokens = {
{Opt_enospc_debug, "enospc_debug"},
{Opt_subvolrootid, "subvolrootid=%d"},
{Opt_defrag, "autodefrag"},
+ {Opt_inode_cache, "inode_cache"},
{Opt_err, NULL},
};
@@ -361,6 +363,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
printk(KERN_INFO "btrfs: enabling disk space caching\n");
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
break;
+ case Opt_inode_cache:
+ printk(KERN_INFO "btrfs: enabling inode map caching\n");
+ btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
+ break;
case Opt_clear_cache:
printk(KERN_INFO "btrfs: force clearing of disk cache\n");
btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -819,7 +825,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags;
+ s->s_flags = flags | MS_NOSEC;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
error = btrfs_fill_super(s, fs_devices, data,
flags & MS_SILENT ? 1 : 0);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dc80f7156923..2b3590b9fe98 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -35,6 +35,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
{
WARN_ON(atomic_read(&transaction->use_count) == 0);
if (atomic_dec_and_test(&transaction->use_count)) {
+ BUG_ON(!list_empty(&transaction->list));
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
@@ -49,46 +50,72 @@ static noinline void switch_commit_root(struct btrfs_root *root)
/*
* either allocate a new transaction or hop into the existing one
*/
-static noinline int join_transaction(struct btrfs_root *root)
+static noinline int join_transaction(struct btrfs_root *root, int nofail)
{
struct btrfs_transaction *cur_trans;
+
+ spin_lock(&root->fs_info->trans_lock);
+ if (root->fs_info->trans_no_join) {
+ if (!nofail) {
+ spin_unlock(&root->fs_info->trans_lock);
+ return -EBUSY;
+ }
+ }
+
cur_trans = root->fs_info->running_transaction;
- if (!cur_trans) {
- cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
- GFP_NOFS);
- if (!cur_trans)
- return -ENOMEM;
- root->fs_info->generation++;
- atomic_set(&cur_trans->num_writers, 1);
- cur_trans->num_joined = 0;
- cur_trans->transid = root->fs_info->generation;
- init_waitqueue_head(&cur_trans->writer_wait);
- init_waitqueue_head(&cur_trans->commit_wait);
- cur_trans->in_commit = 0;
- cur_trans->blocked = 0;
- atomic_set(&cur_trans->use_count, 1);
- cur_trans->commit_done = 0;
- cur_trans->start_time = get_seconds();
-
- cur_trans->delayed_refs.root = RB_ROOT;
- cur_trans->delayed_refs.num_entries = 0;
- cur_trans->delayed_refs.num_heads_ready = 0;
- cur_trans->delayed_refs.num_heads = 0;
- cur_trans->delayed_refs.flushing = 0;
- cur_trans->delayed_refs.run_delayed_start = 0;
- spin_lock_init(&cur_trans->delayed_refs.lock);
-
- INIT_LIST_HEAD(&cur_trans->pending_snapshots);
- list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
- extent_io_tree_init(&cur_trans->dirty_pages,
- root->fs_info->btree_inode->i_mapping);
- spin_lock(&root->fs_info->new_trans_lock);
- root->fs_info->running_transaction = cur_trans;
- spin_unlock(&root->fs_info->new_trans_lock);
- } else {
+ if (cur_trans) {
+ atomic_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
+ spin_unlock(&root->fs_info->trans_lock);
+ return 0;
}
+ spin_unlock(&root->fs_info->trans_lock);
+
+ cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
+ if (!cur_trans)
+ return -ENOMEM;
+ spin_lock(&root->fs_info->trans_lock);
+ if (root->fs_info->running_transaction) {
+ kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+ cur_trans = root->fs_info->running_transaction;
+ atomic_inc(&cur_trans->use_count);
+ atomic_inc(&cur_trans->num_writers);
+ cur_trans->num_joined++;
+ spin_unlock(&root->fs_info->trans_lock);
+ return 0;
+ }
+ atomic_set(&cur_trans->num_writers, 1);
+ cur_trans->num_joined = 0;
+ init_waitqueue_head(&cur_trans->writer_wait);
+ init_waitqueue_head(&cur_trans->commit_wait);
+ cur_trans->in_commit = 0;
+ cur_trans->blocked = 0;
+ /*
+ * One for this trans handle, one so it will live on until we
+ * commit the transaction.
+ */
+ atomic_set(&cur_trans->use_count, 2);
+ cur_trans->commit_done = 0;
+ cur_trans->start_time = get_seconds();
+
+ cur_trans->delayed_refs.root = RB_ROOT;
+ cur_trans->delayed_refs.num_entries = 0;
+ cur_trans->delayed_refs.num_heads_ready = 0;
+ cur_trans->delayed_refs.num_heads = 0;
+ cur_trans->delayed_refs.flushing = 0;
+ cur_trans->delayed_refs.run_delayed_start = 0;
+ spin_lock_init(&cur_trans->commit_lock);
+ spin_lock_init(&cur_trans->delayed_refs.lock);
+
+ INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+ list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+ extent_io_tree_init(&cur_trans->dirty_pages,
+ root->fs_info->btree_inode->i_mapping);
+ root->fs_info->generation++;
+ cur_trans->transid = root->fs_info->generation;
+ root->fs_info->running_transaction = cur_trans;
+ spin_unlock(&root->fs_info->trans_lock);
return 0;
}
@@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root)
* to make sure the old root from before we joined the transaction is deleted
* when the transaction commits
*/
-static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
{
if (root->ref_cows && root->last_trans < trans->transid) {
WARN_ON(root == root->fs_info->extent_root);
WARN_ON(root->commit_root != root->node);
+ spin_lock(&root->fs_info->fs_roots_radix_lock);
+ if (root->last_trans == trans->transid) {
+ spin_unlock(&root->fs_info->fs_roots_radix_lock);
+ return 0;
+ }
+ root->last_trans = trans->transid;
radix_tree_tag_set(&root->fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
- root->last_trans = trans->transid;
+ spin_unlock(&root->fs_info->fs_roots_radix_lock);
btrfs_init_reloc_root(trans, root);
}
return 0;
}
-int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- if (!root->ref_cows)
- return 0;
-
- mutex_lock(&root->fs_info->trans_mutex);
- if (root->last_trans == trans->transid) {
- mutex_unlock(&root->fs_info->trans_mutex);
- return 0;
- }
-
- record_root_in_trans(trans, root);
- mutex_unlock(&root->fs_info->trans_mutex);
- return 0;
-}
-
/* wait for commit against the current transaction to become unblocked
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
@@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root)
{
struct btrfs_transaction *cur_trans;
+ spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
DEFINE_WAIT(wait);
atomic_inc(&cur_trans->use_count);
+ spin_unlock(&root->fs_info->trans_lock);
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (!cur_trans->blocked)
break;
- mutex_unlock(&root->fs_info->trans_mutex);
schedule();
- mutex_lock(&root->fs_info->trans_mutex);
}
finish_wait(&root->fs_info->transaction_wait, &wait);
put_transaction(cur_trans);
+ } else {
+ spin_unlock(&root->fs_info->trans_lock);
}
}
@@ -167,10 +185,16 @@ enum btrfs_trans_type {
static int may_wait_transaction(struct btrfs_root *root, int type)
{
- if (!root->fs_info->log_root_recovering &&
- ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
- type == TRANS_USERSPACE))
+ if (root->fs_info->log_root_recovering)
+ return 0;
+
+ if (type == TRANS_USERSPACE)
return 1;
+
+ if (type == TRANS_START &&
+ !atomic_read(&root->fs_info->open_ioctl_trans))
+ return 1;
+
return 0;
}
@@ -184,36 +208,44 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
return ERR_PTR(-EROFS);
+
+ if (current->journal_info) {
+ WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
+ h = current->journal_info;
+ h->use_count++;
+ h->orig_rsv = h->block_rsv;
+ h->block_rsv = NULL;
+ goto got_it;
+ }
again:
h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
if (!h)
return ERR_PTR(-ENOMEM);
- if (type != TRANS_JOIN_NOLOCK)
- mutex_lock(&root->fs_info->trans_mutex);
if (may_wait_transaction(root, type))
wait_current_trans(root);
- ret = join_transaction(root);
+ do {
+ ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
+ if (ret == -EBUSY)
+ wait_current_trans(root);
+ } while (ret == -EBUSY);
+
if (ret < 0) {
kmem_cache_free(btrfs_trans_handle_cachep, h);
- if (type != TRANS_JOIN_NOLOCK)
- mutex_unlock(&root->fs_info->trans_mutex);
return ERR_PTR(ret);
}
cur_trans = root->fs_info->running_transaction;
- atomic_inc(&cur_trans->use_count);
- if (type != TRANS_JOIN_NOLOCK)
- mutex_unlock(&root->fs_info->trans_mutex);
h->transid = cur_trans->transid;
h->transaction = cur_trans;
h->blocks_used = 0;
- h->block_group = 0;
h->bytes_reserved = 0;
h->delayed_ref_updates = 0;
+ h->use_count = 1;
h->block_rsv = NULL;
+ h->orig_rsv = NULL;
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -241,11 +273,8 @@ again:
}
}
- if (type != TRANS_JOIN_NOLOCK)
- mutex_lock(&root->fs_info->trans_mutex);
- record_root_in_trans(h, root);
- if (type != TRANS_JOIN_NOLOCK)
- mutex_unlock(&root->fs_info->trans_mutex);
+got_it:
+ btrfs_record_root_in_trans(h, root);
if (!current->journal_info && type != TRANS_USERSPACE)
current->journal_info = h;
@@ -257,22 +286,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
{
return start_transaction(root, num_items, TRANS_START);
}
-struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
- int num_blocks)
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN);
}
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
- int num_blocks)
+struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
}
-struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
- int num_blocks)
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
{
- return start_transaction(r, 0, TRANS_USERSPACE);
+ return start_transaction(root, 0, TRANS_USERSPACE);
}
/* wait for a transaction commit to be fully complete */
@@ -280,17 +306,13 @@ static noinline int wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
{
DEFINE_WAIT(wait);
- mutex_lock(&root->fs_info->trans_mutex);
while (!commit->commit_done) {
prepare_to_wait(&commit->commit_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (commit->commit_done)
break;
- mutex_unlock(&root->fs_info->trans_mutex);
schedule();
- mutex_lock(&root->fs_info->trans_mutex);
}
- mutex_unlock(&root->fs_info->trans_mutex);
finish_wait(&commit->commit_wait, &wait);
return 0;
}
@@ -300,59 +322,56 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
struct btrfs_transaction *cur_trans = NULL, *t;
int ret;
- mutex_lock(&root->fs_info->trans_mutex);
-
ret = 0;
if (transid) {
if (transid <= root->fs_info->last_trans_committed)
- goto out_unlock;
+ goto out;
/* find specified transaction */
+ spin_lock(&root->fs_info->trans_lock);
list_for_each_entry(t, &root->fs_info->trans_list, list) {
if (t->transid == transid) {
cur_trans = t;
+ atomic_inc(&cur_trans->use_count);
break;
}
if (t->transid > transid)
break;
}
+ spin_unlock(&root->fs_info->trans_lock);
ret = -EINVAL;
if (!cur_trans)
- goto out_unlock; /* bad transid */
+ goto out; /* bad transid */
} else {
/* find newest transaction that is committing | committed */
+ spin_lock(&root->fs_info->trans_lock);
list_for_each_entry_reverse(t, &root->fs_info->trans_list,
list) {
if (t->in_commit) {
if (t->commit_done)
- goto out_unlock;
+ break;
cur_trans = t;
+ atomic_inc(&cur_trans->use_count);
break;
}
}
+ spin_unlock(&root->fs_info->trans_lock);
if (!cur_trans)
- goto out_unlock; /* nothing committing|committed */
+ goto out; /* nothing committing|committed */
}
- atomic_inc(&cur_trans->use_count);
- mutex_unlock(&root->fs_info->trans_mutex);
-
wait_for_commit(root, cur_trans);
- mutex_lock(&root->fs_info->trans_mutex);
put_transaction(cur_trans);
ret = 0;
-out_unlock:
- mutex_unlock(&root->fs_info->trans_mutex);
+out:
return ret;
}
void btrfs_throttle(struct btrfs_root *root)
{
- mutex_lock(&root->fs_info->trans_mutex);
- if (!root->fs_info->open_ioctl_trans)
+ if (!atomic_read(&root->fs_info->open_ioctl_trans))
wait_current_trans(root);
- mutex_unlock(&root->fs_info->trans_mutex);
}
static int should_end_transaction(struct btrfs_trans_handle *trans,
@@ -370,6 +389,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans = trans->transaction;
int updates;
+ smp_mb();
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
return 1;
@@ -388,6 +408,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info = root->fs_info;
int count = 0;
+ if (--trans->use_count) {
+ trans->block_rsv = trans->orig_rsv;
+ return 0;
+ }
+
while (count < 4) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
@@ -410,9 +435,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_metadata(trans, root);
- if (lock && !root->fs_info->open_ioctl_trans &&
- should_end_transaction(trans, root))
+ if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
+ should_end_transaction(trans, root)) {
trans->transaction->blocked = 1;
+ smp_wmb();
+ }
if (lock && cur_trans->blocked && !cur_trans->in_commit) {
if (throttle)
@@ -703,9 +730,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
*/
int btrfs_add_dead_root(struct btrfs_root *root)
{
- mutex_lock(&root->fs_info->trans_mutex);
+ spin_lock(&root->fs_info->trans_lock);
list_add(&root->root_list, &root->fs_info->dead_roots);
- mutex_unlock(&root->fs_info->trans_mutex);
+ spin_unlock(&root->fs_info->trans_lock);
return 0;
}
@@ -721,6 +748,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
int ret;
int err = 0;
+ spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
(void **)gang, 0,
@@ -733,6 +761,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
btrfs_free_log(trans, root);
btrfs_update_reloc_root(trans, root);
@@ -753,10 +782,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
err = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key,
&root->root_item);
+ spin_lock(&fs_info->fs_roots_radix_lock);
if (err)
break;
}
}
+ spin_unlock(&fs_info->fs_roots_radix_lock);
return err;
}
@@ -786,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
btrfs_btree_balance_dirty(info->tree_root, nr);
cond_resched();
- if (root->fs_info->closing || ret != -EAGAIN)
+ if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
break;
}
root->defrag_running = 0;
@@ -851,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent = dget_parent(dentry);
parent_inode = parent->d_inode;
parent_root = BTRFS_I(parent_inode)->root;
- record_root_in_trans(trans, parent_root);
+ btrfs_record_root_in_trans(trans, parent_root);
/*
* insert the directory item
@@ -869,7 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, parent_root, parent_inode);
BUG_ON(ret);
- record_root_in_trans(trans, root);
+ btrfs_record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
btrfs_check_and_init_root_item(new_root_item);
@@ -967,20 +998,20 @@ static void update_super_roots(struct btrfs_root *root)
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
{
int ret = 0;
- spin_lock(&info->new_trans_lock);
+ spin_lock(&info->trans_lock);
if (info->running_transaction)
ret = info->running_transaction->in_commit;
- spin_unlock(&info->new_trans_lock);
+ spin_unlock(&info->trans_lock);
return ret;
}
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
{
int ret = 0;
- spin_lock(&info->new_trans_lock);
+ spin_lock(&info->trans_lock);
if (info->running_transaction)
ret = info->running_transaction->blocked;
- spin_unlock(&info->new_trans_lock);
+ spin_unlock(&info->trans_lock);
return ret;
}
@@ -1004,9 +1035,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
&wait);
break;
}
- mutex_unlock(&root->fs_info->trans_mutex);
schedule();
- mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
}
}
@@ -1032,9 +1061,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
&wait);
break;
}
- mutex_unlock(&root->fs_info->trans_mutex);
schedule();
- mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&root->fs_info->transaction_wait,
&wait);
}
@@ -1072,7 +1099,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root;
- ac->newtrans = btrfs_join_transaction(root, 0);
+ ac->newtrans = btrfs_join_transaction(root);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
@@ -1080,23 +1107,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
}
/* take transaction reference */
- mutex_lock(&root->fs_info->trans_mutex);
cur_trans = trans->transaction;
atomic_inc(&cur_trans->use_count);
- mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
schedule_delayed_work(&ac->work, 0);
/* wait for transaction to start and unblock */
- mutex_lock(&root->fs_info->trans_mutex);
if (wait_for_unblock)
wait_current_trans_commit_start_and_unblock(root, cur_trans);
else
wait_current_trans_commit_start(root, cur_trans);
- put_transaction(cur_trans);
- mutex_unlock(&root->fs_info->trans_mutex);
+ if (current->journal_info == trans)
+ current->journal_info = NULL;
+
+ put_transaction(cur_trans);
return 0;
}
@@ -1139,38 +1165,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
ret = btrfs_run_delayed_refs(trans, root, 0);
BUG_ON(ret);
- mutex_lock(&root->fs_info->trans_mutex);
+ spin_lock(&cur_trans->commit_lock);
if (cur_trans->in_commit) {
+ spin_unlock(&cur_trans->commit_lock);
atomic_inc(&cur_trans->use_count);
- mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
ret = wait_for_commit(root, cur_trans);
BUG_ON(ret);
- mutex_lock(&root->fs_info->trans_mutex);
put_transaction(cur_trans);
- mutex_unlock(&root->fs_info->trans_mutex);
return 0;
}
trans->transaction->in_commit = 1;
trans->transaction->blocked = 1;
+ spin_unlock(&cur_trans->commit_lock);
wake_up(&root->fs_info->transaction_blocked_wait);
+ spin_lock(&root->fs_info->trans_lock);
if (cur_trans->list.prev != &root->fs_info->trans_list) {
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (!prev_trans->commit_done) {
atomic_inc(&prev_trans->use_count);
- mutex_unlock(&root->fs_info->trans_mutex);
+ spin_unlock(&root->fs_info->trans_lock);
wait_for_commit(root, prev_trans);
- mutex_lock(&root->fs_info->trans_mutex);
put_transaction(prev_trans);
+ } else {
+ spin_unlock(&root->fs_info->trans_lock);
}
+ } else {
+ spin_unlock(&root->fs_info->trans_lock);
}
if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
@@ -1178,12 +1207,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
do {
int snap_pending = 0;
+
joined = cur_trans->num_joined;
if (!list_empty(&trans->transaction->pending_snapshots))
snap_pending = 1;
WARN_ON(cur_trans != trans->transaction);
- mutex_unlock(&root->fs_info->trans_mutex);
if (flush_on_commit || snap_pending) {
btrfs_start_delalloc_inodes(root, 1);
@@ -1206,14 +1235,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
prepare_to_wait(&cur_trans->writer_wait, &wait,
TASK_UNINTERRUPTIBLE);
- smp_mb();
if (atomic_read(&cur_trans->num_writers) > 1)
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
else if (should_grow)
schedule_timeout(1);
- mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&cur_trans->writer_wait, &wait);
+ spin_lock(&root->fs_info->trans_lock);
+ root->fs_info->trans_no_join = 1;
+ spin_unlock(&root->fs_info->trans_lock);
} while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
@@ -1258,9 +1288,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_prepare_extent_commit(trans, root);
cur_trans = root->fs_info->running_transaction;
- spin_lock(&root->fs_info->new_trans_lock);
- root->fs_info->running_transaction = NULL;
- spin_unlock(&root->fs_info->new_trans_lock);
btrfs_set_root_node(&root->fs_info->tree_root->root_item,
root->fs_info->tree_root->node);
@@ -1281,10 +1308,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
sizeof(root->fs_info->super_copy));
trans->transaction->blocked = 0;
+ spin_lock(&root->fs_info->trans_lock);
+ root->fs_info->running_transaction = NULL;
+ root->fs_info->trans_no_join = 0;
+ spin_unlock(&root->fs_info->trans_lock);
wake_up(&root->fs_info->transaction_wait);
- mutex_unlock(&root->fs_info->trans_mutex);
ret = btrfs_write_and_wait_transaction(trans, root);
BUG_ON(ret);
write_ctree_super(trans, root, 0);
@@ -1297,22 +1327,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_finish_extent_commit(trans, root);
- mutex_lock(&root->fs_info->trans_mutex);
-
cur_trans->commit_done = 1;
root->fs_info->last_trans_committed = cur_trans->transid;
wake_up(&cur_trans->commit_wait);
+ spin_lock(&root->fs_info->trans_lock);
list_del_init(&cur_trans->list);
+ spin_unlock(&root->fs_info->trans_lock);
+
put_transaction(cur_trans);
put_transaction(cur_trans);
trace_btrfs_transaction_commit(root);
- mutex_unlock(&root->fs_info->trans_mutex);
-
btrfs_scrub_continue(root);
if (current->journal_info == trans)
@@ -1334,9 +1363,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
- mutex_lock(&fs_info->trans_mutex);
+ spin_lock(&fs_info->trans_lock);
list_splice_init(&fs_info->dead_roots, &list);
- mutex_unlock(&fs_info->trans_mutex);
+ spin_unlock(&fs_info->trans_lock);
while (!list_empty(&list)) {
root = list_entry(list.next, struct btrfs_root, root_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 804c88639e5d..02564e6230ac 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -28,10 +28,12 @@ struct btrfs_transaction {
* transaction can end
*/
atomic_t num_writers;
+ atomic_t use_count;
unsigned long num_joined;
+
+ spinlock_t commit_lock;
int in_commit;
- atomic_t use_count;
int commit_done;
int blocked;
struct list_head list;
@@ -45,13 +47,14 @@ struct btrfs_transaction {
struct btrfs_trans_handle {
u64 transid;
- u64 block_group;
u64 bytes_reserved;
+ unsigned long use_count;
unsigned long blocks_reserved;
unsigned long blocks_used;
unsigned long delayed_ref_updates;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
+ struct btrfs_block_rsv *orig_rsv;
};
struct btrfs_pending_snapshot {
@@ -66,19 +69,6 @@ struct btrfs_pending_snapshot {
struct list_head list;
};
-static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
- struct inode *inode)
-{
- trans->block_group = BTRFS_I(inode)->block_group;
-}
-
-static inline void btrfs_update_inode_block_group(
- struct btrfs_trans_handle *trans,
- struct inode *inode)
-{
- BTRFS_I(inode)->block_group = trans->block_group;
-}
-
static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
struct inode *inode)
{
@@ -92,12 +82,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items);
-struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
- int num_blocks);
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
- int num_blocks);
-struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
- int num_blocks);
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c48214ef5c09..1efa56e18f9b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
BUG_ON(!new_device);
memcpy(new_device, device, sizeof(*new_device));
new_device->name = kstrdup(device->name, GFP_NOFS);
- BUG_ON(!new_device->name);
+ BUG_ON(device->name && !new_device->name);
new_device->bdev = NULL;
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
transid = btrfs_super_generation(disk_super);
if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label);
- else {
- /* FIXME, make a readl uuid parser */
- printk(KERN_INFO "device fsid %llx-%llx ",
- *(unsigned long long *)disk_super->fsid,
- *(unsigned long long *)(disk_super->fsid + 8));
- }
+ else
+ printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index f3107e4b4d56..5366fe452ab0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_set_trans_block_group(trans, inode);
-
ret = do_setxattr(trans, inode, name, value, size, flags);
if (ret)
goto out;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 33da49dc3cc6..5a3953db8118 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
int err;
struct inode *inode = page->mapping->host;
BUG_ON(!inode);
- igrab(inode);
+ ihold(inode);
err = writepage_nounlock(page, wbc);
unlock_page(page);
iput(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 1f72b00447c4..f605753c8fe9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
while (!list_empty(&mdsc->cap_dirty)) {
ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
i_dirty_item);
- inode = igrab(&ci->vfs_inode);
+ inode = &ci->vfs_inode;
+ ihold(inode);
dout("flush_dirty_caps %p\n", inode);
spin_unlock(&mdsc->cap_dirty_lock);
- if (inode) {
- ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
- NULL);
- iput(inode);
- }
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
+ iput(inode);
spin_lock(&mdsc->cap_dirty_lock);
}
spin_unlock(&mdsc->cap_dirty_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 33729e822bb9..ef8f08c343e8 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -308,7 +308,8 @@ more:
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_dentry = dget(filp->f_dentry);
/* hints to request -> mds selection code */
req->r_direct_mode = USE_AUTH_MDS;
@@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
- if (err)
+ if (err) {
d_drop(dentry);
- else if (!req->r_reply_info.head->is_dentry)
- d_instantiate(dentry, igrab(old_dentry->d_inode));
+ } else if (!req->r_reply_info.head->is_dentry) {
+ ihold(old_dentry->d_inode);
+ d_instantiate(dentry, old_dentry->d_inode);
+ }
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index a610d3d67488..f67b687550de 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
err = ceph_mdsc_do_request(mdsc, NULL, req);
inode = req->r_target_inode;
if (inode)
- igrab(inode);
+ ihold(inode);
ceph_mdsc_put_request(req);
if (!inode)
return ERR_PTR(-ESTALE);
@@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
err = ceph_mdsc_do_request(mdsc, NULL, req);
inode = req->r_target_inode;
if (inode)
- igrab(inode);
+ ihold(inode);
ceph_mdsc_put_request(req);
if (!inode)
return ERR_PTR(err ? err : -ESTALE);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 203252d88d9f..9542f07d0b93 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file)
err = PTR_ERR(req);
goto out;
}
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_num_caps = 1;
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
if (!err)
@@ -282,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file)
static int striped_read(struct inode *inode,
u64 off, u64 len,
struct page **pages, int num_pages,
- int *checkeof, bool align_to_pages,
+ int *checkeof, bool o_direct,
unsigned long buf_align)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -307,7 +308,7 @@ static int striped_read(struct inode *inode,
io_align = off & ~PAGE_MASK;
more:
- if (align_to_pages)
+ if (o_direct)
page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
else
page_align = pos & ~PAGE_MASK;
@@ -317,10 +318,10 @@ more:
ci->i_truncate_seq,
ci->i_truncate_size,
page_pos, pages_left, page_align);
- hit_stripe = this_len < left;
- was_short = ret >= 0 && ret < this_len;
if (ret == -ENOENT)
ret = 0;
+ hit_stripe = this_len < left;
+ was_short = ret >= 0 && ret < this_len;
dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
@@ -345,20 +346,22 @@ more:
}
if (was_short) {
- /* was original extent fully inside i_size? */
- if (pos + left <= inode->i_size) {
- dout("zero tail\n");
- ceph_zero_page_vector_range(page_off + read, len - read,
+ /* did we bounce off eof? */
+ if (pos + left > inode->i_size)
+ *checkeof = 1;
+
+ /* zero trailing bytes (inside i_size) */
+ if (left > 0 && pos < inode->i_size) {
+ if (pos + left > inode->i_size)
+ left = inode->i_size - pos;
+
+ dout("zero tail %d\n", left);
+ ceph_zero_page_vector_range(page_off + read, left,
pages);
- read = len;
- goto out;
+ read += left;
}
-
- /* check i_size */
- *checkeof = 1;
}
-out:
if (ret >= 0)
ret = read;
dout("striped_read returns %d\n", ret);
@@ -658,7 +661,7 @@ out:
/* hit EOF or hole? */
if (statret == 0 && *ppos < inode->i_size) {
- dout("aio_read sync_read hit hole, reading more\n");
+ dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
read += ret;
base += ret;
len -= ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 70b6a4839c38..d8858e96ab18 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
goto done;
}
req->r_dentry = dn; /* may have spliced */
- igrab(in);
+ ihold(in);
} else if (ceph_ino(in) == vino.ino &&
ceph_snap(in) == vino.snap) {
- igrab(in);
+ ihold(in);
} else {
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
dn, in, ceph_ino(in), ceph_snap(in),
@@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
goto done;
}
req->r_dentry = dn; /* may have spliced */
- igrab(in);
+ ihold(in);
rinfo->head->is_dentry = 1; /* fool notrace handlers */
}
@@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode)
if (queue_work(ceph_inode_to_client(inode)->wb_wq,
&ceph_inode(inode)->i_wb_work)) {
dout("ceph_queue_writeback %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_writeback %p failed\n", inode);
}
@@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode)
if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
&ceph_inode(inode)->i_pg_inv_work)) {
dout("ceph_queue_invalidate %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_invalidate %p failed\n", inode);
}
@@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work)) {
dout("ceph_queue_vmtruncate %p\n", inode);
- igrab(inode);
+ ihold(inode);
} else {
dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
inode, ci->i_truncate_pending);
@@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
__mark_inode_dirty(inode, inode_dirty_flags);
if (mask) {
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = release;
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
@@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask)
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_num_caps = 1;
req->r_args.getattr.mask = cpu_to_le32(mask);
err = ceph_mdsc_do_request(mdsc, NULL, req);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8888c9ba68db..ef0b5f48e13a 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
req->r_args.setlayout.layout.fl_stripe_unit =
@@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_args.setlayout.layout.fl_stripe_unit =
cpu_to_le32(l.stripe_unit);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 476b329867d4..80576d05d687 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
/* mds requires start and length rather than start and end */
if (LLONG_MAX == fl->fl_end)
@@ -32,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
length = fl->fl_end - fl->fl_start + 1;
dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type`: %d", (int)lock_type,
+ "length: %llu, wait: %d, type: %d", (int)lock_type,
(int)operation, (u64)fl->fl_pid, fl->fl_start,
length, wait, fl->fl_type);
-
req->r_args.filelock_change.rule = lock_type;
req->r_args.filelock_change.type = cmd;
req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
@@ -70,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
}
ceph_mdsc_put_request(req);
dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
- "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type,
+ "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
(int)operation, (u64)fl->fl_pid, fl->fl_start,
length, wait, fl->fl_type, err);
return err;
@@ -109,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
dout("mds locked, locking locally");
err = posix_lock_file(file, fl, NULL);
if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
- /* undo! This should only happen if the kernel detects
- * local deadlock. */
+ /* undo! This should only happen if
+ * the kernel detects local
+ * deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
CEPH_LOCK_UNLOCK, 0, fl);
- dout("got %d on posix_lock_file, undid lock", err);
+ dout("got %d on posix_lock_file, undid lock",
+ err);
}
}
- } else {
- dout("mds returned error code %d", err);
+ } else if (err == -ERESTARTSYS) {
+ dout("undoing lock\n");
+ ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+ CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
@@ -155,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
file, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on flock_lock_file_wait, undid lock", err);
}
- } else {
- dout("mds error code %d", err);
+ } else if (err == -ERESTARTSYS) {
+ dout("undoing lock\n");
+ ceph_lock_message(CEPH_LOCK_FLOCK,
+ CEPH_MDS_OP_SETFILELOCK,
+ file, CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 24067d68a554..54b14de2e729 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
ci = list_first_entry(&mdsc->snap_flush_list,
struct ceph_inode_info, i_snap_flush_item);
inode = &ci->vfs_inode;
- igrab(inode);
+ ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
spin_lock(&inode->i_lock);
__ceph_flush_snaps(ci, &session, 0);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index f2b628696180..f42d730f1b66 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
err = PTR_ERR(req);
goto out;
}
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req->r_num_caps = 1;
req->r_args.setxattr.flags = cpu_to_le32(flags);
@@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
- req->r_inode = igrab(inode);
+ req->r_inode = inode;
+ ihold(inode);
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
req->r_num_caps = 1;
req->r_path2 = kstrdup(name, GFP_NOFS);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 1cd4c3a1862d..53ed1ad2c112 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -7,6 +7,7 @@ config CIFS
select CRYPTO_MD5
select CRYPTO_HMAC
select CRYPTO_ARC4
+ select CRYPTO_ECB
select CRYPTO_DES
help
This is the client VFS module for the Common Internet File System
@@ -148,7 +149,7 @@ config CIFS_FSCACHE
config CIFS_ACL
bool "Provide CIFS ACL support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && CIFS_XATTR
+ depends on EXPERIMENTAL && CIFS_XATTR && KEYS
help
Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
is handed over to the application/caller.
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index dfbd9f1f373d..5a0ee7f2af06 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -184,7 +184,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
if (cifs_pdu == NULL || server == NULL)
return -EINVAL;
- if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
+ if (!server->session_estab)
return 0;
if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6d88b82537c3..bb659eb73810 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -784,7 +784,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
char *value, *data, *end;
- char *mountdata_copy, *options;
+ char *mountdata_copy = NULL, *options;
unsigned int temp_len, i, j;
char separator[2];
short int override_uid = -1;
@@ -1391,7 +1391,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
} else if (strnicmp(data, "fsc", 3) == 0) {
#ifndef CONFIG_CIFS_FSCACHE
- cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE"
+ cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
"kernel config option set");
goto cifs_parse_mount_err;
#endif
@@ -1976,7 +1976,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
- "ntlmv2 in kernel release 2.6.41");
+ "ntlmv2 in kernel release 3.1");
}
ses->overrideSecFlg = volume_info->secFlg;
diff --git a/fs/dcookies.c b/fs/dcookies.c
index a21cabdbd87b..dda0dc702d1b 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
/* FIXME: (deleted) ? */
path = d_path(&dcs->path, kbuf, PAGE_SIZE);
+ mutex_unlock(&dcookie_mutex);
+
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_free;
@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
out_free:
kfree(kbuf);
+ return err;
out:
mutex_unlock(&dcookie_mutex);
return err;
diff --git a/fs/exec.c b/fs/exec.c
index ea5f748906a8..97e0d52d72fd 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
+ set_fs(USER_DS);
current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
flush_thread();
current->personality &= ~bprm->per_clear;
@@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
if (retval)
return retval;
- /* kernel module loader fixup */
- /* so we don't try to load run modprobe in kernel space. */
- set_fs(USER_DS);
-
retval = audit_bprm(bprm);
if (retval)
return retval;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 7257752b6d5d..7018e1d8902d 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
if (attr & ATTR_SYS)
inode->i_flags |= S_IMMUTABLE;
else
- inode->i_flags &= S_IMMUTABLE;
+ inode->i_flags &= ~S_IMMUTABLE;
}
fat_save_attrs(inode, attr);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cc6ec4b2f0ff..38f84cd48b67 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_flags & MS_MANDLOCK)
goto err;
+ sb->s_flags &= ~MS_NOSEC;
+
if (!parse_fuse_opt((char *) data, &d, is_bdev))
goto err;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2792a790e50b..1c1336e7b3b2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
drop_ref = 1;
}
spin_lock(&gl->gl_spin);
- if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
unsigned long holdtime, now = jiffies;
+
holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
if (time_before(now, holdtime))
delay = holdtime - now;
- set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
+
+ if (!delay) {
+ clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
+ }
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 278e3fb40b71..583636f745e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb)
bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
log);
if (IS_ERR(bdev)) {
- rc = -PTR_ERR(bdev);
+ rc = PTR_ERR(bdev);
goto free;
}
diff --git a/fs/namei.c b/fs/namei.c
index e2e4e8d032ee..9802345df5e7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2624,6 +2624,10 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
+ if (!dentry->d_inode) {
+ error = -ENOENT;
+ goto exit3;
+ }
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
@@ -2709,11 +2713,10 @@ static long do_unlinkat(int dfd, const char __user *pathname)
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
- if (nd.last.name[nd.last.len])
- goto slashes;
inode = dentry->d_inode;
- if (inode)
- ihold(inode);
+ if (nd.last.name[nd.last.len] || !inode)
+ goto slashes;
+ ihold(inode);
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7eafe468a29c..b2e3ff347620 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree,
path[level].bp_bh = NULL;
}
+static void nilfs_btree_nop(struct nilfs_bmap *btree,
+ struct nilfs_btree_path *path,
+ int level, __u64 *keyp, __u64 *ptrp)
+{
+}
static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
@@ -1356,20 +1361,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
- int pindex, level, ncmin, ncmax, ncblk, ret;
+ int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
ret = 0;
stats->bs_nblocks = 0;
ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
ncblk = nilfs_btree_nchildren_per_block(btree);
- for (level = NILFS_BTREE_LEVEL_NODE_MIN;
+ for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(node, path[level].bp_index,
- ncblk);
+ nilfs_btree_node_get_ptr(node, dindex, ncblk);
ret = nilfs_bmap_prepare_end_ptr(btree,
&path[level].bp_oldreq, dat);
if (ret < 0)
@@ -1383,6 +1387,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
+ dindex = pindex;
if (pindex > 0) {
/* left sibling */
@@ -1421,6 +1426,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_concat_right;
stats->bs_nblocks++;
+ /*
+ * When merging right sibling node
+ * into the current node, pointer to
+ * the right sibling node must be
+ * terminated instead. The adjustment
+ * below is required for that.
+ */
+ dindex = pindex + 1;
/* continue; */
}
} else {
@@ -1431,29 +1444,31 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_shrink;
stats->bs_nblocks += 2;
+ level++;
+ path[level].bp_op = nilfs_btree_nop;
+ goto shrink_root_child;
} else {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
+ goto out;
}
-
- goto out;
-
}
}
+ /* child of the root node is deleted */
+ path[level].bp_op = nilfs_btree_do_delete;
+ stats->bs_nblocks++;
+
+shrink_root_child:
node = nilfs_btree_get_root(btree);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(node, path[level].bp_index,
+ nilfs_btree_node_get_ptr(node, dindex,
NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
- /* child of the root node is deleted */
- path[level].bp_op = nilfs_btree_do_delete;
- stats->bs_nblocks++;
-
/* success */
out:
*levelp = level;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 141646e88fb5..bb24ab6c282f 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
if (nilfs->ns_interval)
- sci->sc_interval = nilfs->ns_interval;
+ sci->sc_interval = HZ * nilfs->ns_interval;
if (nilfs->ns_watermark)
sci->sc_watermark = nilfs->ns_watermark;
return sci;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cdbaf5e97308..56f61027236b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = OCFS2_SUPER_MAGIC;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
/* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f82e762eeca2..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
- struct gendisk *disk = dev_to_disk(dev);
- unsigned int alignment = 0;
-
- if (disk->queue)
- alignment = queue_limit_discard_alignment(&disk->queue->limits,
- p->start_sect);
- return sprintf(buf, "%u\n", alignment);
+ return sprintf(buf, "%u\n", p->discard_alignment);
}
ssize_t part_stat_show(struct device *dev,
@@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
p->start_sect = start;
p->alignment_offset =
queue_limit_alignment_offset(&disk->queue->limits, start);
+ p->discard_alignment =
+ queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);
diff --git a/fs/super.c b/fs/super.c
index c75593953c52..ab3d672db0de 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags;
+ s->s_flags = flags | MS_NOSEC;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 166951e0dcd3..3be645e012c9 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
ubifs_assert(wbuf->size % c->min_io_size == 0);
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(!c->space_fixup);
if (c->leb_size - wbuf->offs >= c->max_write_size)
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(!c->space_fixup);
if (c->ro_error)
return -EROFS;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 34b1679e6e3a..cef0460f4c54 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -669,6 +669,7 @@ out_free:
out_release:
release_head(c, BASEHD);
+ kfree(dent);
out_ro:
ubifs_ro_mode(c, err);
if (last_reference)
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index bd644bf587a8..a5422fffbd69 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) == -EUCLEAN)
sleb = ubifs_recover_leb(c, lnum, 0,
- c->sbuf, 0);
+ c->sbuf, -1);
if (IS_ERR(sleb)) {
err = PTR_ERR(sleb);
break;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 731d9e2e7b50..783d8e0beb76 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
}
/**
- * drop_last_node - drop the last node or group of nodes.
+ * drop_last_group - drop the last group of nodes.
* @sleb: scanned LEB information
* @offs: offset of dropped nodes is returned here
- * @grouped: non-zero if whole group of nodes have to be dropped
*
* This is a helper function for 'ubifs_recover_leb()' which drops the last
- * node of the scanned LEB or the last group of nodes if @grouped is not zero.
- * This function returns %1 if a node was dropped and %0 otherwise.
+ * group of nodes of the scanned LEB.
*/
-static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
+static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
{
- int dropped = 0;
-
while (!list_empty(&sleb->nodes)) {
struct ubifs_scan_node *snod;
struct ubifs_ch *ch;
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
list);
ch = snod->node;
if (ch->group_type != UBIFS_IN_NODE_GROUP)
- return dropped;
- dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
+ break;
+
+ dbg_rcvry("dropping grouped node at %d:%d",
+ sleb->lnum, snod->offs);
+ *offs = snod->offs;
+ list_del(&snod->list);
+ kfree(snod);
+ sleb->nodes_cnt -= 1;
+ }
+}
+
+/**
+ * drop_last_node - drop the last node.
+ * @sleb: scanned LEB information
+ * @offs: offset of dropped nodes is returned here
+ * @grouped: non-zero if whole group of nodes have to be dropped
+ *
+ * This is a helper function for 'ubifs_recover_leb()' which drops the last
+ * node of the scanned LEB.
+ */
+static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
+{
+ struct ubifs_scan_node *snod;
+
+ if (!list_empty(&sleb->nodes)) {
+ snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
+ list);
+
+ dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
*offs = snod->offs;
list_del(&snod->list);
kfree(snod);
sleb->nodes_cnt -= 1;
- dropped = 1;
- if (!grouped)
- break;
}
- return dropped;
}
/**
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
* @lnum: LEB number
* @offs: offset
* @sbuf: LEB-sized buffer to use
- * @grouped: nodes may be grouped for recovery
+ * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
+ * belong to any journal head)
*
* This function does a scan of a LEB, but caters for errors that might have
* been caused by the unclean unmount from which we are attempting to recover.
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
* found, and a negative error code in case of failure.
*/
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
- int offs, void *sbuf, int grouped)
+ int offs, void *sbuf, int jhead)
{
int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
+ int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
struct ubifs_scan_leb *sleb;
void *buf = sbuf + offs;
- dbg_rcvry("%d:%d", lnum, offs);
+ dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
sleb = ubifs_start_scan(c, lnum, offs, sbuf);
if (IS_ERR(sleb))
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
* Scan quietly until there is an error from which we cannot
* recover
*/
- ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
if (ret == SCANNED_A_NODE) {
/* A valid node, and not a padding node */
struct ubifs_ch *ch = buf;
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
* If nodes are grouped, always drop the incomplete group at
* the end.
*/
- drop_last_node(sleb, &offs, 1);
+ drop_last_group(sleb, &offs);
- /*
- * While we are in the middle of the same min. I/O unit keep dropping
- * nodes. So basically, what we want is to make sure that the last min.
- * I/O unit where we saw the corruption is dropped completely with all
- * the uncorrupted node which may possibly sit there.
- *
- * In other words, let's name the min. I/O unit where the corruption
- * starts B, and the previous min. I/O unit A. The below code tries to
- * deal with a situation when half of B contains valid nodes or the end
- * of a valid node, and the second half of B contains corrupted data or
- * garbage. This means that UBIFS had been writing to B just before the
- * power cut happened. I do not know how realistic is this scenario
- * that half of the min. I/O unit had been written successfully and the
- * other half not, but this is possible in our 'failure mode emulation'
- * infrastructure at least.
- *
- * So what is the problem, why we need to drop those nodes? Whey can't
- * we just clean-up the second half of B by putting a padding node
- * there? We can, and this works fine with one exception which was
- * reproduced with power cut emulation testing and happens extremely
- * rarely. The description follows, but it is worth noting that that is
- * only about the GC head, so we could do this trick only if the bud
- * belongs to the GC head, but it does not seem to be worth an
- * additional "if" statement.
- *
- * So, imagine the file-system is full, we run GC which is moving valid
- * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
- * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
- * and will try to continue. Imagine that LEB X is currently the
- * dirtiest LEB, and the amount of used space in LEB Y is exactly the
- * same as amount of free space in LEB X.
- *
- * And a power cut happens when nodes are moved from LEB X to LEB Y. We
- * are here trying to recover LEB Y which is the GC head LEB. We find
- * the min. I/O unit B as described above. Then we clean-up LEB Y by
- * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
- * fails, because it cannot find a dirty LEB which could be GC'd into
- * LEB Y! Even LEB X does not match because the amount of valid nodes
- * there does not fit the free space in LEB Y any more! And this is
- * because of the padding node which we added to LEB Y. The
- * user-visible effect of this which I once observed and analysed is
- * that we cannot mount the file-system with -ENOSPC error.
- *
- * So obviously, to make sure that situation does not happen we should
- * free min. I/O unit B in LEB Y completely and the last used min. I/O
- * unit in LEB Y should be A. This is basically what the below code
- * tries to do.
- */
- while (min_io_unit == round_down(offs, c->min_io_size) &&
- min_io_unit != offs &&
- drop_last_node(sleb, &offs, grouped));
+ if (jhead == GCHD) {
+ /*
+ * If this LEB belongs to the GC head then while we are in the
+ * middle of the same min. I/O unit keep dropping nodes. So
+ * basically, what we want is to make sure that the last min.
+ * I/O unit where we saw the corruption is dropped completely
+ * with all the uncorrupted nodes which may possibly sit there.
+ *
+ * In other words, let's name the min. I/O unit where the
+ * corruption starts B, and the previous min. I/O unit A. The
+ * below code tries to deal with a situation when half of B
+ * contains valid nodes or the end of a valid node, and the
+ * second half of B contains corrupted data or garbage. This
+ * means that UBIFS had been writing to B just before the power
+ * cut happened. I do not know how realistic is this scenario
+ * that half of the min. I/O unit had been written successfully
+ * and the other half not, but this is possible in our 'failure
+ * mode emulation' infrastructure at least.
+ *
+ * So what is the problem, why we need to drop those nodes? Why
+ * can't we just clean-up the second half of B by putting a
+ * padding node there? We can, and this works fine with one
+ * exception which was reproduced with power cut emulation
+ * testing and happens extremely rarely.
+ *
+ * Imagine the file-system is full, we run GC which starts
+ * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
+ * the current GC head LEB). The @c->gc_lnum is -1, which means
+ * that GC will retain LEB X and will try to continue. Imagine
+ * that LEB X is currently the dirtiest LEB, and the amount of
+ * used space in LEB Y is exactly the same as amount of free
+ * space in LEB X.
+ *
+ * And a power cut happens when nodes are moved from LEB X to
+ * LEB Y. We are here trying to recover LEB Y which is the GC
+ * head LEB. We find the min. I/O unit B as described above.
+ * Then we clean-up LEB Y by padding min. I/O unit. And later
+ * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
+ * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
+ * does not match because the amount of valid nodes there does
+ * not fit the free space in LEB Y any more! And this is
+ * because of the padding node which we added to LEB Y. The
+ * user-visible effect of this which I once observed and
+ * analysed is that we cannot mount the file-system with
+ * -ENOSPC error.
+ *
+ * So obviously, to make sure that situation does not happen we
+ * should free min. I/O unit B in LEB Y completely and the last
+ * used min. I/O unit in LEB Y should be A. This is basically
+ * what the below code tries to do.
+ */
+ while (offs > min_io_unit)
+ drop_last_node(sleb, &offs);
+ }
buf = sbuf + offs;
len = c->leb_size - offs;
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
}
ubifs_scan_destroy(sleb);
}
- return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
+ return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
}
/**
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 6617280d1679..5e97161ce4d3 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
* these LEBs could possibly be written to at the power cut
* time.
*/
- sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
- b->bud->jhead != GCHD);
+ sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
else
sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
if (IS_ERR(sleb))
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index ca953a945029..9e1d05666fed 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
if (nr == 0)
- return clean_zn_cnt;
+ /*
+ * Due to the way UBIFS updates the clean znode counter it may
+ * temporarily be negative.
+ */
+ return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
if (!clean_zn_cnt) {
/*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1ab0d22e4c94..b5aeb5a8ebed 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
c->jheads[i].wbuf.jhead = i;
+ c->jheads[i].grouped = 1;
}
c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
/*
* Garbage Collector head likely contains long-term data and
- * does not need to be synchronized by timer.
+ * does not need to be synchronized by timer. Also GC head nodes are
+ * not grouped.
*/
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
c->jheads[GCHD].wbuf.no_timer = 1;
+ c->jheads[GCHD].grouped = 0;
return 0;
}
@@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
ubifs_msg("recovery needed");
c->need_recovery = 1;
- if (!c->ro_mount) {
- err = ubifs_recover_inl_heads(c, c->sbuf);
- if (err)
- goto out_master;
- }
- } else if (!c->ro_mount) {
+ }
+
+ if (c->need_recovery && !c->ro_mount) {
+ err = ubifs_recover_inl_heads(c, c->sbuf);
+ if (err)
+ goto out_master;
+ }
+
+ err = ubifs_lpt_init(c, 1, !c->ro_mount);
+ if (err)
+ goto out_master;
+
+ if (!c->ro_mount && c->space_fixup) {
+ err = ubifs_fixup_free_space(c);
+ if (err)
+ goto out_master;
+ }
+
+ if (!c->ro_mount) {
/*
* Set the "dirty" flag so that if we reboot uncleanly we
* will notice this immediately on the next mount.
@@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
err = ubifs_write_master(c);
if (err)
- goto out_master;
+ goto out_lpt;
}
- err = ubifs_lpt_init(c, 1, !c->ro_mount);
- if (err)
- goto out_lpt;
-
err = dbg_check_idx_size(c, c->bi.old_idx_sz);
if (err)
goto out_lpt;
@@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
} else
ubifs_assert(c->lst.taken_empty_lebs > 0);
- if (!c->ro_mount && c->space_fixup) {
- err = ubifs_fixup_free_space(c);
- if (err)
- goto out_infos;
- }
-
err = dbg_check_filesystem(c);
if (err)
goto out_infos;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 8119b1fd8d94..91b4213dde84 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
*/
void ubifs_tnc_close(struct ubifs_info *c)
{
- long clean_freed;
-
tnc_destroy_cnext(c);
if (c->zroot.znode) {
- clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
- atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
+ long n;
+
+ ubifs_destroy_tnc_subtree(c->zroot.znode);
+ n = atomic_long_read(&c->clean_zn_cnt);
+ atomic_long_sub(n, &ubifs_clean_zn_cnt);
}
kfree(c->gap_lebs);
kfree(c->ilebs);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index a70d7b4ffb25..f79983d6f860 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -722,12 +722,14 @@ struct ubifs_bud {
* struct ubifs_jhead - journal head.
* @wbuf: head's write-buffer
* @buds_list: list of bud LEBs belonging to this journal head
+ * @grouped: non-zero if UBIFS groups nodes when writing to this journal head
*
* Note, the @buds list is protected by the @c->buds_lock.
*/
struct ubifs_jhead {
struct ubifs_wbuf wbuf;
struct list_head buds_list;
+ unsigned int grouped:1;
};
/**
@@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
int ubifs_recover_master_node(struct ubifs_info *c);
int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
- int offs, void *sbuf, int grouped);
+ int offs, void *sbuf, int jhead);
struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf);
int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);