summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/delayed-inode.c24
-rw-r--r--fs/btrfs/delayed-inode.h15
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/fs.h18
4 files changed, 30 insertions, 35 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 2286bee2c6d3..a752646257df 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -257,7 +257,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
struct list_head *p;
struct btrfs_delayed_node *next = NULL;
- delayed_root = node->root->fs_info->delayed_root;
+ delayed_root = &node->root->fs_info->delayed_root;
spin_lock(&delayed_root->lock);
if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
/* not in the list */
@@ -287,7 +287,7 @@ static void __btrfs_release_delayed_node(
if (!delayed_node)
return;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
mutex_lock(&delayed_node->mutex);
if (delayed_node->count)
@@ -425,7 +425,7 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
delayed_node->index_cnt = ins->index + 1;
delayed_node->count++;
- atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+ atomic_inc(&delayed_node->root->fs_info->delayed_root.items);
return 0;
}
@@ -452,7 +452,7 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
/* If it's in a rbtree, then we need to have delayed node locked. */
lockdep_assert_held(&delayed_node->mutex);
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
@@ -988,7 +988,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count--;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
finish_one_item(delayed_root);
}
}
@@ -1002,7 +1002,7 @@ static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
ASSERT(delayed_node->root);
delayed_node->count--;
- delayed_root = delayed_node->root->fs_info->delayed_root;
+ delayed_root = &delayed_node->root->fs_info->delayed_root;
finish_one_item(delayed_root);
}
}
@@ -1168,7 +1168,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
block_rsv = trans->block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
- delayed_root = fs_info->delayed_root;
+ delayed_root = &fs_info->delayed_root;
curr_node = btrfs_first_delayed_node(delayed_root, &curr_delayed_node_tracker);
while (curr_node && (!count || nr--)) {
@@ -1417,7 +1417,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
struct btrfs_ref_tracker delayed_node_tracker;
struct btrfs_delayed_node *node;
- node = btrfs_first_delayed_node( fs_info->delayed_root, &delayed_node_tracker);
+ node = btrfs_first_delayed_node(&fs_info->delayed_root, &delayed_node_tracker);
if (WARN_ON(node)) {
btrfs_delayed_node_ref_tracker_free(node,
&delayed_node_tracker);
@@ -1440,7 +1440,7 @@ static bool could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
- struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
+ struct btrfs_delayed_root *delayed_root = &fs_info->delayed_root;
if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
btrfs_workqueue_normal_congested(fs_info->delayed_workers))
@@ -1970,7 +1970,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count++;
- atomic_inc(&root->fs_info->delayed_root->items);
+ atomic_inc(&root->fs_info->delayed_root.items);
release_node:
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
@@ -2012,7 +2012,7 @@ int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
mutex_lock(&delayed_node->mutex);
if (!test_and_set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
delayed_node->count++;
- atomic_inc(&fs_info->delayed_root->items);
+ atomic_inc(&fs_info->delayed_root.items);
}
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node, &delayed_node_tracker);
@@ -2118,7 +2118,7 @@ void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_ref_tracker curr_delayed_node_tracker, prev_delayed_node_tracker;
- curr_node = btrfs_first_delayed_node(fs_info->delayed_root,
+ curr_node = btrfs_first_delayed_node(&fs_info->delayed_root,
&curr_delayed_node_tracker);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index b09d4ec8c77d..fc752863f89b 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -30,21 +30,6 @@ enum btrfs_delayed_item_type {
BTRFS_DELAYED_DELETION_ITEM
};
-struct btrfs_delayed_root {
- spinlock_t lock;
- struct list_head node_list;
- /*
- * Used for delayed nodes which is waiting to be dealt with by the
- * worker. If the delayed node is inserted into the work queue, we
- * drop it from this list.
- */
- struct list_head prepare_list;
- atomic_t items; /* for delayed items */
- atomic_t items_seq; /* for delayed items */
- int nodes; /* for delayed nodes */
- wait_queue_head_t wait;
-};
-
struct btrfs_ref_tracker_dir {
#ifdef CONFIG_BTRFS_DEBUG
struct ref_tracker_dir dir;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 32fffb0557e5..665440ecce12 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -22,6 +22,7 @@
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
+#include "delayed-inode.h"
#include "bio.h"
#include "print-tree.h"
#include "locking.h"
@@ -1217,7 +1218,6 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
kfree(fs_info->balance_ctl);
- kfree(fs_info->delayed_root);
free_global_roots(fs_info);
btrfs_put_root(fs_info->tree_root);
btrfs_put_root(fs_info->chunk_root);
@@ -2942,11 +2942,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
- fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
- GFP_KERNEL);
- if (!fs_info->delayed_root)
- return -ENOMEM;
- btrfs_init_delayed_root(fs_info->delayed_root);
+ btrfs_init_delayed_root(&fs_info->delayed_root);
if (sb_rdonly(sb))
set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 5bbc993a66e1..d315530a2928 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -44,7 +44,6 @@ struct btrfs_block_group;
struct btrfs_root;
struct btrfs_fs_devices;
struct btrfs_transaction;
-struct btrfs_delayed_root;
struct btrfs_balance_control;
struct btrfs_subpage_info;
struct btrfs_stripe_hash_table;
@@ -464,6 +463,21 @@ struct btrfs_commit_stats {
u64 critical_section_start_time;
};
+struct btrfs_delayed_root {
+ spinlock_t lock;
+ struct list_head node_list;
+ /*
+ * Used for delayed nodes which is waiting to be dealt with by the
+ * worker. If the delayed node is inserted into the work queue, we
+ * drop it from this list.
+ */
+ struct list_head prepare_list;
+ atomic_t items; /* for delayed items */
+ atomic_t items_seq; /* for delayed items */
+ int nodes; /* for delayed nodes */
+ wait_queue_head_t wait;
+};
+
struct btrfs_fs_info {
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
unsigned long flags;
@@ -817,7 +831,7 @@ struct btrfs_fs_info {
/* Filesystem state */
unsigned long fs_state;
- struct btrfs_delayed_root *delayed_root;
+ struct btrfs_delayed_root delayed_root;
/* Entries are eb->start >> nodesize_bits */
struct xarray buffer_tree;