summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2019-04-15 15:15:24 +0200
committerDavid Sterba <dsterba@suse.com>2019-04-29 19:02:43 +0200
commit34e73cc930a8677426c9cbffdd3421e18f32e79f (patch)
treefa697652fe3dc42fc3e4afceeb047355c8710e5e /fs/btrfs/locking.c
parentBtrfs: remove no longer used member num_dirty_bgs from transaction (diff)
downloadlinux-34e73cc930a8677426c9cbffdd3421e18f32e79f.tar.xz
linux-34e73cc930a8677426c9cbffdd3421e18f32e79f.zip
btrfs: trace: Introduce trace events for sleepable tree lock
There are two tree lock events which can sleep: - btrfs_tree_read_lock() - btrfs_tree_lock() Sometimes we may need to look into the concurrency picture of the fs. For that case, we need the execution time of above two functions and the owner of @eb. Here we introduce a trace events for user space tools like bcc, to get the execution time of above two functions, and get detailed owner info where eBPF code can't. All the overhead is hidden behind the trace events, so if events are not enabled, there is no overhead. These trace events also output bytenr and generation, allow them to be pared with unlock events to pin down deadlock. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6df03ba36026..67b77f1d113e 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -158,6 +158,10 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
*/
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
+ u64 start_ns = 0;
+
+ if (trace_btrfs_tree_read_lock_enabled())
+ start_ns = ktime_get_ns();
again:
BUG_ON(!atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner);
@@ -174,6 +178,7 @@ again:
BUG_ON(eb->lock_nested);
eb->lock_nested = true;
read_unlock(&eb->lock);
+ trace_btrfs_tree_read_lock(eb, start_ns);
return;
}
if (atomic_read(&eb->blocking_writers)) {
@@ -184,6 +189,7 @@ again:
}
btrfs_assert_tree_read_locks_get(eb);
btrfs_assert_spinning_readers_get(eb);
+ trace_btrfs_tree_read_lock(eb, start_ns);
}
/*
@@ -299,6 +305,11 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
*/
void btrfs_tree_lock(struct extent_buffer *eb)
{
+ u64 start_ns = 0;
+
+ if (trace_btrfs_tree_lock_enabled())
+ start_ns = ktime_get_ns();
+
WARN_ON(eb->lock_owner == current->pid);
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -312,6 +323,7 @@ again:
btrfs_assert_spinning_writers_get(eb);
btrfs_assert_tree_write_locks_get(eb);
eb->lock_owner = current->pid;
+ trace_btrfs_tree_lock(eb, start_ns);
}
/*