summaryrefslogtreecommitdiff
path: root/fs/btrfs/locking.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/locking.h')
-rw-r--r--fs/btrfs/locking.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 29135def468e..0453a4797693 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -17,7 +17,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb);
void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
-void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
+void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
@@ -37,6 +38,18 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
BUG();
}
+/*
+ * If we currently have a spinning reader or writer lock (indicated by the rw
+ * flag) this will bump the count of blocking holders and drop the spinlock.
+ */
+static inline void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
+{
+ if (rw == BTRFS_WRITE_LOCK)
+ btrfs_set_lock_blocking_write(eb);
+ else if (rw == BTRFS_READ_LOCK)
+ btrfs_set_lock_blocking_read(eb);
+}
+
static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
{
btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);