summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ordered-data.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2010-02-02 21:51:14 +0100
committerChris Mason <chris.mason@oracle.com>2010-03-15 16:00:13 +0100
commit5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60 (patch)
treebc73836bf0bde137da48eaff72dae75ee729aaba /fs/btrfs/ordered-data.c
parentBtrfs: cache extent state in find_delalloc_range (diff)
downloadlinux-5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60.tar.xz
linux-5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60.zip
Btrfs: cache ordered extent when completing io
When finishing io we run btrfs_dec_test_ordered_pending, and then immediately run btrfs_lookup_ordered_extent, but btrfs_dec_test_ordered_pending does that already, so we're searching twice when we don't have to. This patch lets us pass a btrfs_ordered_extent in to btrfs_dec_test_ordered_pending so if we do complete io on that ordered extent we can just use the one we found then instead of having to do another btrfs_lookup_ordered_extent. This made my fio job with the other patch go from 24 mb/s to 29 mb/s. Signed-off-by: Josef Bacik <josef@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r--fs/btrfs/ordered-data.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index d56f732ba95e..a8ffecd0b491 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -232,11 +232,12 @@ int btrfs_add_ordered_sum(struct inode *inode,
* to make sure this function only returns 1 once for a given ordered extent.
*/
int btrfs_dec_test_ordered_pending(struct inode *inode,
+ struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
- struct btrfs_ordered_extent *entry;
+ struct btrfs_ordered_extent *entry = NULL;
int ret;
tree = &BTRFS_I(inode)->ordered_tree;
@@ -264,6 +265,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
else
ret = 1;
out:
+ if (!ret && cached && entry) {
+ *cached = entry;
+ atomic_inc(&entry->refs);
+ }
spin_unlock(&tree->lock);
return ret == 0;
}