diff options
author | David Sterba <dsterba@suse.com> | 2016-04-26 23:54:39 +0200 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2016-04-29 13:48:14 +0200 |
commit | 210aa27768bec4297a9d6ad0e5cab45935c775e9 (patch) | |
tree | 3b1e723a0f4dda6cfe820f20f8d193dc3b4c5100 /fs/btrfs/extent_io.c | |
parent | btrfs: make state preallocation more speculative in __set_extent_bit (diff) | |
download | linux-210aa27768bec4297a9d6ad0e5cab45935c775e9.tar.xz linux-210aa27768bec4297a9d6ad0e5cab45935c775e9.zip |
btrfs: sink gfp parameter to convert_extent_bit
Single caller passes GFP_NOFS. We can get rid of the
gfpflags_allow_blocking checks as NOFS can block but does not recurse to
filesystem through reclaim.
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8707bcc615ff..c1139bcf8870 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1078,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, * @bits: the bits to set in this range * @clear_bits: the bits to clear in this range * @cached_state: state that we're going to cache - * @mask: the allocation mask * * This will go through and set bits for the given range. If any states exist * already in this range they are set with the given bit and cleared of the * clear_bits. This is only meant to be used by things that are mergeable, ie * converting from say DELALLOC to DIRTY. This is not meant to be used with * boundary bits like LOCK. + * + * All allocations are done with GFP_NOFS. */ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, unsigned bits, unsigned clear_bits, - struct extent_state **cached_state, gfp_t mask) + struct extent_state **cached_state) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -1103,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, btrfs_debug_check_extent_io_range(tree, start, end); again: - if (!prealloc && gfpflags_allow_blocking(mask)) { + if (!prealloc) { /* * Best effort, don't worry if extent state allocation fails * here for the first iteration. We might have a cached state @@ -1111,7 +1112,7 @@ again: * extent state allocations are needed. We'll only know this * after locking the tree. */ - prealloc = alloc_extent_state(mask); + prealloc = alloc_extent_state(GFP_NOFS); if (!prealloc && !first_iteration) return -ENOMEM; } @@ -1272,8 +1273,7 @@ search_again: if (start > end) goto out; spin_unlock(&tree->lock); - if (gfpflags_allow_blocking(mask)) - cond_resched(); + cond_resched(); first_iteration = false; goto again; |