diff options
author | Chris Mason <chris.mason@oracle.com> | 2009-09-11 18:27:37 +0200 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-09-11 19:31:07 +0200 |
commit | a1ed835e1ab5795f91b198d08c43e2f56848dcf3 (patch) | |
tree | ac3b370823fa76c5be7698e3663306badbbd622d /fs/btrfs/extent_map.c | |
parent | Btrfs: Use PagePrivate2 to track pages in the data=ordered code. (diff) | |
download | linux-a1ed835e1ab5795f91b198d08c43e2f56848dcf3.tar.xz linux-a1ed835e1ab5795f91b198d08c43e2f56848dcf3.zip |
Btrfs: Fix extent replacment race
Data COW means that whenever we write to a file, we replace any old
extent pointers with new ones. There was a window where a readpage
might find the old extent pointers on disk and cache them in the
extent_map tree in ram in the middle of a given write replacing them.
Even though both the readpage and the write had their respective bytes
in the file locked, the extent readpage inserts may cover more bytes than
it had locked down.
This commit closes the race by keeping the new extent pinned in the extent
map tree until after the on-disk btree is properly setup with the new
extent pointers.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 72e9fa3c31f5..5bc7a0d325e7 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) return 0; } +int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) +{ + int ret = 0; + struct extent_map *merge = NULL; + struct rb_node *rb; + struct extent_map *em; + + write_lock(&tree->lock); + em = lookup_extent_mapping(tree, start, len); + + WARN_ON(em->start != start || !em); + + if (!em) + goto out; + + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + + if (em->start != 0) { + rb = rb_prev(&em->rb_node); + if (rb) + merge = rb_entry(rb, struct extent_map, rb_node); + if (rb && mergable_maps(merge, em)) { + em->start = merge->start; + em->len += merge->len; + em->block_len += merge->block_len; + em->block_start = merge->block_start; + merge->in_tree = 0; + rb_erase(&merge->rb_node, &tree->map); + free_extent_map(merge); + } + } + + rb = rb_next(&em->rb_node); + if (rb) + merge = rb_entry(rb, struct extent_map, rb_node); + if (rb && mergable_maps(em, merge)) { + em->len += merge->len; + em->block_len += merge->len; + rb_erase(&merge->rb_node, &tree->map); + merge->in_tree = 0; + free_extent_map(merge); + } + + free_extent_map(em); +out: + write_unlock(&tree->lock); + return ret; + +} + /** * add_extent_mapping - add new extent map to the extent tree * @tree: tree to insert new map in |