diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-05 05:13:46 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-05 05:13:46 +0200 |
commit | 9daee913dc8d15eb65e0ff560803ab1c28bb480b (patch) | |
tree | 389d943ccf6c280176365e3b09f9e87a105f0733 /fs/ext2 | |
parent | Merge tag 'for-5.20/block-2022-08-04' of git://git.kernel.dk/linux-block (diff) | |
parent | ext4: add ioctls to get/set the ext4 superblock uuid (diff) | |
download | linux-9daee913dc8d15eb65e0ff560803ab1c28bb480b.tar.xz linux-9daee913dc8d15eb65e0ff560803ab1c28bb480b.zip |
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o:
"Add new ioctls to set and get the file system UUID in the ext4
superblock and improved the performance of the online resizing of file
systems with bigalloc enabled.
Fixed a lot of bugs, in particular for the inline data feature,
potential races when creating and deleting inodes with shared extended
attribute blocks, and the handling of directory blocks which are
corrupted"
* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (37 commits)
ext4: add ioctls to get/set the ext4 superblock uuid
ext4: avoid resizing to a partial cluster size
ext4: reduce computation of overhead during resize
jbd2: fix assertion 'jh->b_frozen_data == NULL' failure when journal aborted
ext4: block range must be validated before use in ext4_mb_clear_bb()
mbcache: automatically delete entries from cache on freeing
mbcache: Remove mb_cache_entry_delete()
ext2: avoid deleting xattr block that is being reused
ext2: unindent codeblock in ext2_xattr_set()
ext2: factor our freeing of xattr block reference
ext4: fix race when reusing xattr blocks
ext4: unindent codeblock in ext4_xattr_block_set()
ext4: remove EA inode entry from mbcache on inode eviction
mbcache: add functions to delete entry if unused
mbcache: don't reclaim used entries
ext4: make sure ext4_append() always allocates new block
ext4: check if directory block is within i_size
ext4: reflect mb_optimize_scan value in options file
ext4: avoid remove directory when directory is corrupted
ext4: aligned '*' in comments
...
Diffstat (limited to 'fs/ext2')
-rw-r--r-- | fs/ext2/xattr.c | 170 |
1 files changed, 78 insertions, 92 deletions
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 841fa6d9d744..641abfa4b718 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -517,36 +517,36 @@ bad_block: /* Here we know that we can set the new attribute. */ if (header) { - /* assert(header == HDR(bh)); */ + int offset; + lock_buffer(bh); if (header->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(header->h_hash); + struct mb_cache_entry *oe; - ea_bdebug(bh, "modifying in-place"); + oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode), + hash, bh->b_blocknr); + if (!oe) { + ea_bdebug(bh, "modifying in-place"); + goto update_block; + } /* - * This must happen under buffer lock for - * ext2_xattr_set2() to reliably detect modified block + * Someone is trying to reuse the block, leave it alone */ - mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, - bh->b_blocknr); - - /* keep the buffer locked while modifying it. */ - } else { - int offset; - - unlock_buffer(bh); - ea_bdebug(bh, "cloning"); - header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); - error = -ENOMEM; - if (header == NULL) - goto cleanup; - header->h_refcount = cpu_to_le32(1); - - offset = (char *)here - bh->b_data; - here = ENTRY((char *)header + offset); - offset = (char *)last - bh->b_data; - last = ENTRY((char *)header + offset); + mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe); } + unlock_buffer(bh); + ea_bdebug(bh, "cloning"); + header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); + error = -ENOMEM; + if (header == NULL) + goto cleanup; + header->h_refcount = cpu_to_le32(1); + + offset = (char *)here - bh->b_data; + here = ENTRY((char *)header + offset); + offset = (char *)last - bh->b_data; + last = ENTRY((char *)header + offset); } else { /* Allocate a buffer where we construct the new block. */ header = kzalloc(sb->s_blocksize, GFP_KERNEL); @@ -559,6 +559,7 @@ bad_block: last = here = ENTRY(header+1); } +update_block: /* Iff we are modifying the block in-place, bh is locked here. */ if (not_found) { @@ -651,6 +652,55 @@ cleanup: return error; } +static void ext2_xattr_release_block(struct inode *inode, + struct buffer_head *bh) +{ + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); + +retry_ref: + lock_buffer(bh); + if (HDR(bh)->h_refcount == cpu_to_le32(1)) { + __u32 hash = le32_to_cpu(HDR(bh)->h_hash); + struct mb_cache_entry *oe; + + /* + * This must happen under buffer lock to properly + * serialize with ext2_xattr_set() reusing the block. + */ + oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, + bh->b_blocknr); + if (oe) { + /* + * Someone is trying to reuse the block. Wait + * and retry. + */ + unlock_buffer(bh); + mb_cache_entry_wait_unused(oe); + mb_cache_entry_put(ea_block_cache, oe); + goto retry_ref; + } + + /* Free the old block. */ + ea_bdebug(bh, "freeing"); + ext2_free_blocks(inode, bh->b_blocknr, 1); + /* We let our caller release bh, so we + * need to duplicate the buffer before. */ + get_bh(bh); + bforget(bh); + unlock_buffer(bh); + } else { + /* Decrement the refcount only. */ + le32_add_cpu(&HDR(bh)->h_refcount, -1); + dquot_free_block(inode, 1); + mark_buffer_dirty(bh); + unlock_buffer(bh); + ea_bdebug(bh, "refcount now=%d", + le32_to_cpu(HDR(bh)->h_refcount)); + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); + } +} + /* * Second half of ext2_xattr_set(): Update the file system. */ @@ -747,34 +797,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, * If there was an old block and we are no longer using it, * release the old block. */ - lock_buffer(old_bh); - if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { - __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash); - - /* - * This must happen under buffer lock for - * ext2_xattr_set2() to reliably detect freed block - */ - mb_cache_entry_delete(ea_block_cache, hash, - old_bh->b_blocknr); - /* Free the old block. */ - ea_bdebug(old_bh, "freeing"); - ext2_free_blocks(inode, old_bh->b_blocknr, 1); - mark_inode_dirty(inode); - /* We let our caller release old_bh, so we - * need to duplicate the buffer before. */ - get_bh(old_bh); - bforget(old_bh); - } else { - /* Decrement the refcount only. */ - le32_add_cpu(&HDR(old_bh)->h_refcount, -1); - dquot_free_block_nodirty(inode, 1); - mark_inode_dirty(inode); - mark_buffer_dirty(old_bh); - ea_bdebug(old_bh, "refcount now=%d", - le32_to_cpu(HDR(old_bh)->h_refcount)); - } - unlock_buffer(old_bh); + ext2_xattr_release_block(inode, old_bh); } cleanup: @@ -828,30 +851,7 @@ ext2_xattr_delete_inode(struct inode *inode) EXT2_I(inode)->i_file_acl); goto cleanup; } - lock_buffer(bh); - if (HDR(bh)->h_refcount == cpu_to_le32(1)) { - __u32 hash = le32_to_cpu(HDR(bh)->h_hash); - - /* - * This must happen under buffer lock for ext2_xattr_set2() to - * reliably detect freed block - */ - mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, - bh->b_blocknr); - ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); - get_bh(bh); - bforget(bh); - unlock_buffer(bh); - } else { - le32_add_cpu(&HDR(bh)->h_refcount, -1); - ea_bdebug(bh, "refcount now=%d", - le32_to_cpu(HDR(bh)->h_refcount)); - unlock_buffer(bh); - mark_buffer_dirty(bh); - if (IS_SYNC(inode)) - sync_dirty_buffer(bh); - dquot_free_block_nodirty(inode, 1); - } + ext2_xattr_release_block(inode, bh); EXT2_I(inode)->i_file_acl = 0; cleanup: @@ -943,7 +943,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); -again: + ce = mb_cache_entry_find_first(ea_block_cache, hash); while (ce) { struct buffer_head *bh; @@ -955,22 +955,8 @@ again: inode->i_ino, (unsigned long) ce->e_value); } else { lock_buffer(bh); - /* - * We have to be careful about races with freeing or - * rehashing of xattr block. Once we hold buffer lock - * xattr block's state is stable so we can check - * whether the block got freed / rehashed or not. - * Since we unhash mbcache entry under buffer lock when - * freeing / rehashing xattr block, checking whether - * entry is still hashed is reliable. - */ - if (hlist_bl_unhashed(&ce->e_hash_list)) { - mb_cache_entry_put(ea_block_cache, ce); - unlock_buffer(bh); - brelse(bh); - goto again; - } else if (le32_to_cpu(HDR(bh)->h_refcount) > - EXT2_XATTR_REFCOUNT_MAX) { + if (le32_to_cpu(HDR(bh)->h_refcount) > + EXT2_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %ld refcount %d>%d", (unsigned long) ce->e_value, le32_to_cpu(HDR(bh)->h_refcount), |