summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2019-09-24 19:17:17 +0200
committerDavid Sterba <dsterba@suse.com>2019-11-18 12:46:49 +0100
commited2b1d36a9d027f9b841be5bfc9d61011462d447 (patch)
tree3dc4a4bdbd2503e8955d691b7f0d75956ba4d176
parentbtrfs: make btrfs_assert_tree_locked static inline (diff)
downloadlinux-ed2b1d36a9d027f9b841be5bfc9d61011462d447.tar.xz
linux-ed2b1d36a9d027f9b841be5bfc9d61011462d447.zip
btrfs: move btrfs_set_path_blocking to other locking functions
The function belongs to the family of locking functions, so move it there. The 'noinline' keyword is dropped as it's now an exported function that does not need it. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/ctree.c25
-rw-r--r--fs/btrfs/locking.c26
-rw-r--r--fs/btrfs/locking.h2
3 files changed, 28 insertions, 25 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0231141de289..a55d55e5c913 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -56,31 +56,6 @@ struct btrfs_path *btrfs_alloc_path(void)
return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
-/*
- * set all locked nodes in the path to blocking locks. This should
- * be done before scheduling
- */
-noinline void btrfs_set_path_blocking(struct btrfs_path *p)
-{
- int i;
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- if (!p->nodes[i] || !p->locks[i])
- continue;
- /*
- * If we currently have a spinning reader or writer lock this
- * will bump the count of blocking holders and drop the
- * spinlock.
- */
- if (p->locks[i] == BTRFS_READ_LOCK) {
- btrfs_set_lock_blocking_read(p->nodes[i]);
- p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
- } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
- btrfs_set_lock_blocking_write(p->nodes[i]);
- p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
- }
- }
-}
-
/* this also releases the path */
void btrfs_free_path(struct btrfs_path *p)
{
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 028513153ac4..f58606887859 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -316,3 +316,29 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
write_unlock(&eb->lock);
}
}
+
+/*
+ * Set all locked nodes in the path to blocking locks. This should be done
+ * before scheduling
+ */
+void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ if (!p->nodes[i] || !p->locks[i])
+ continue;
+ /*
+ * If we currently have a spinning reader or writer lock this
+ * will bump the count of blocking holders and drop the
+ * spinlock.
+ */
+ if (p->locks[i] == BTRFS_READ_LOCK) {
+ btrfs_set_lock_blocking_read(p->nodes[i]);
+ p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+ } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+ btrfs_set_lock_blocking_write(p->nodes[i]);
+ p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+ }
+ }
+}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index ab4020de25e7..98c92222eaf0 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -33,6 +33,8 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
#endif
+void btrfs_set_path_blocking(struct btrfs_path *p);
+
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{
if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)