summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@fusionio.com>2012-07-23 21:25:05 +0200
committerChris Mason <chris.mason@fusionio.com>2012-07-23 21:36:18 +0200
commitcbea5ac1ee03197354bd38caad3fcb798f185181 (patch)
tree3d6b62c1fd163107ecb08d6489cf0bc159e6df12 /fs/btrfs/locking.c
parentBtrfs: don't wait around for new log writers on an SSD (diff)
downloadlinux-cbea5ac1ee03197354bd38caad3fcb798f185181.tar.xz
linux-cbea5ac1ee03197354bd38caad3fcb798f185181.zip
Btrfs: reduce calls to wake_up on uncontended locks
The btrfs locks were unconditionally calling wake_up as the locks were released. This lead to extra thrashing on the waitqueue, especially for locks that were dominated by readers. Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 272f911203ff..a44eff074805 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -78,13 +78,15 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers);
- if (atomic_dec_and_test(&eb->blocking_writers))
+ if (atomic_dec_and_test(&eb->blocking_writers) &&
+ waitqueue_active(&eb->write_lock_wq))
wake_up(&eb->write_lock_wq);
} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock);
atomic_inc(&eb->spinning_readers);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
}
return;
@@ -199,7 +201,8 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
}
btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
atomic_dec(&eb->read_locks);
}
@@ -247,8 +250,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_dec(&eb->blocking_writers);
- smp_wmb();
- wake_up(&eb->write_lock_wq);
+ smp_mb();
+ if (waitqueue_active(&eb->write_lock_wq))
+ wake_up(&eb->write_lock_wq);
} else {
WARN_ON(atomic_read(&eb->spinning_writers) != 1);
atomic_dec(&eb->spinning_writers);