summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.h
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2018-04-04 01:43:05 +0200
committerDavid Sterba <dsterba@suse.com>2019-02-25 14:13:27 +0100
commitb95be2d9fb2a6120958b777e13d2328f9770bc2d (patch)
tree6be675094acd2c61d2638c07e35b744567e273c6 /fs/btrfs/locking.h
parentbtrfs: qgroup: Cleanup old subtree swap code (diff)
downloadlinux-b95be2d9fb2a6120958b777e13d2328f9770bc2d.tar.xz
linux-b95be2d9fb2a6120958b777e13d2328f9770bc2d.zip
btrfs: split btrfs_set_lock_blocking_rw to read and write helpers
There are many callers that hardcode the desired lock type so we can avoid the switch and call them directly. Split the current function to two but leave a helper that still takes the variable lock type to make current code compile. The call sites will be converted in followup patches. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/locking.h')
-rw-r--r--fs/btrfs/locking.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 29135def468e..0453a4797693 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -17,7 +17,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb);
void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
-void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
+void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
@@ -37,6 +38,18 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
BUG();
}
+/*
+ * If we currently have a spinning reader or writer lock (indicated by the rw
+ * flag) this will bump the count of blocking holders and drop the spinlock.
+ */
+static inline void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
+{
+ if (rw == BTRFS_WRITE_LOCK)
+ btrfs_set_lock_blocking_write(eb);
+ else if (rw == BTRFS_READ_LOCK)
+ btrfs_set_lock_blocking_read(eb);
+}
+
static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
{
btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);