summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2013-07-25 13:22:37 +0200
committerChris Mason <chris.mason@fusionio.com>2013-09-01 14:04:36 +0200
commit125bac016d60e78120e92904a5b2fc3a5ebf0475 (patch)
treed3120749dacb8646f8d2af851c390f79a3c86640 /fs/btrfs/extent_io.c
parentBtrfs: batch the extent state operation when reading pages (diff)
downloadlinux-125bac016d60e78120e92904a5b2fc3a5ebf0475.tar.xz
linux-125bac016d60e78120e92904a5b2fc3a5ebf0475.zip
Btrfs: cache the extent map struct when reading several pages
When we read several pages at once, we needn't get the extent map object every time we deal with a page, and we can cache the extent map object. So, we can reduce the search time of the extent map, and besides that, we also can reduce the lock contention of the extent map tree. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c57
1 files changed, 46 insertions, 11 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0d40d082f0c7..daf180dafe39 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2720,6 +2720,33 @@ void set_page_extent_mapped(struct page *page)
}
}
+static struct extent_map *
+__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
+ u64 start, u64 len, get_extent_t *get_extent,
+ struct extent_map **em_cached)
+{
+ struct extent_map *em;
+
+ if (em_cached && *em_cached) {
+ em = *em_cached;
+ if (em->in_tree && start >= em->start &&
+ start < extent_map_end(em)) {
+ atomic_inc(&em->refs);
+ return em;
+ }
+
+ free_extent_map(em);
+ *em_cached = NULL;
+ }
+
+ em = get_extent(inode, page, pg_offset, start, len, 0);
+ if (em_cached && !IS_ERR_OR_NULL(em)) {
+ BUG_ON(*em_cached);
+ atomic_inc(&em->refs);
+ *em_cached = em;
+ }
+ return em;
+}
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
@@ -2729,6 +2756,7 @@ void set_page_extent_mapped(struct page *page)
static int __do_readpage(struct extent_io_tree *tree,
struct page *page,
get_extent_t *get_extent,
+ struct extent_map **em_cached,
struct bio **bio, int mirror_num,
unsigned long *bio_flags, int rw)
{
@@ -2793,8 +2821,8 @@ static int __do_readpage(struct extent_io_tree *tree,
&cached, GFP_NOFS);
break;
}
- em = get_extent(inode, page, pg_offset, cur,
- end - cur + 1, 0);
+ em = __get_extent_map(inode, page, pg_offset, cur,
+ end - cur + 1, get_extent, em_cached);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
unlock_extent(tree, cur, end);
@@ -2895,6 +2923,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
struct page *pages[], int nr_pages,
u64 start, u64 end,
get_extent_t *get_extent,
+ struct extent_map **em_cached,
struct bio **bio, int mirror_num,
unsigned long *bio_flags, int rw)
{
@@ -2915,8 +2944,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
}
for (index = 0; index < nr_pages; index++) {
- __do_readpage(tree, pages[index], get_extent, bio, mirror_num,
- bio_flags, rw);
+ __do_readpage(tree, pages[index], get_extent, em_cached, bio,
+ mirror_num, bio_flags, rw);
page_cache_release(pages[index]);
}
}
@@ -2924,6 +2953,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
static void __extent_readpages(struct extent_io_tree *tree,
struct page *pages[],
int nr_pages, get_extent_t *get_extent,
+ struct extent_map **em_cached,
struct bio **bio, int mirror_num,
unsigned long *bio_flags, int rw)
{
@@ -2944,8 +2974,9 @@ static void __extent_readpages(struct extent_io_tree *tree,
} else {
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, bio,
- mirror_num, bio_flags, rw);
+ end, get_extent, em_cached,
+ bio, mirror_num, bio_flags,
+ rw);
start = page_start;
end = start + PAGE_CACHE_SIZE - 1;
first_index = index;
@@ -2955,7 +2986,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
if (end)
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, bio,
+ end, get_extent, em_cached, bio,
mirror_num, bio_flags, rw);
}
@@ -2981,8 +3012,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
btrfs_put_ordered_extent(ordered);
}
- ret = __do_readpage(tree, page, get_extent, bio, mirror_num, bio_flags,
- rw);
+ ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
+ bio_flags, rw);
return ret;
}
@@ -3836,6 +3867,7 @@ int extent_readpages(struct extent_io_tree *tree,
unsigned long bio_flags = 0;
struct page *pagepool[16];
struct page *page;
+ struct extent_map *em_cached = NULL;
int nr = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
@@ -3852,14 +3884,17 @@ int extent_readpages(struct extent_io_tree *tree,
pagepool[nr++] = page;
if (nr < ARRAY_SIZE(pagepool))
continue;
- __extent_readpages(tree, pagepool, nr, get_extent,
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
&bio, 0, &bio_flags, READ);
nr = 0;
}
if (nr)
- __extent_readpages(tree, pagepool, nr, get_extent,
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
&bio, 0, &bio_flags, READ);
+ if (em_cached)
+ free_extent_map(em_cached);
+
BUG_ON(!list_empty(pages));
if (bio)
return submit_one_bio(READ, bio, 0, bio_flags);