diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/block_dev.c | 86 | ||||
-rw-r--r-- | fs/btrfs/backref.c | 2 | ||||
-rw-r--r-- | fs/btrfs/send.c | 77 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 32 | ||||
-rw-r--r-- | fs/btrfs/transaction.h | 3 | ||||
-rw-r--r-- | fs/compat_ioctl.c | 3 | ||||
-rw-r--r-- | fs/coredump.c | 44 | ||||
-rw-r--r-- | fs/dax.c | 2 | ||||
-rw-r--r-- | fs/f2fs/file.c | 63 | ||||
-rw-r--r-- | fs/f2fs/gc.c | 70 | ||||
-rw-r--r-- | fs/f2fs/super.c | 48 | ||||
-rw-r--r-- | fs/gfs2/bmap.c | 15 | ||||
-rw-r--r-- | fs/io_uring.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/xattr.c | 3 | ||||
-rw-r--r-- | fs/super.c | 5 | ||||
-rw-r--r-- | fs/xfs/scrub/dabtree.c | 6 | ||||
-rw-r--r-- | fs/xfs/xfs_itable.c | 3 |
17 files changed, 251 insertions, 214 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index c2a85b587922..a6f7c892cb4a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -439,6 +439,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ret = -EAGAIN; goto error; } + ret = dio->size; if (polled) WRITE_ONCE(iocb->ki_cookie, qc); @@ -465,7 +466,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ret = -EAGAIN; goto error; } - ret += bio->bi_iter.bi_size; + ret = dio->size; bio = bio_alloc(gfp, nr_pages); if (!bio) { @@ -1181,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) * Pointer to the block device containing @bdev on success, ERR_PTR() * value on failure. */ -static struct block_device *bd_start_claiming(struct block_device *bdev, - void *holder) +struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) { struct gendisk *disk; struct block_device *whole; @@ -1229,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, return ERR_PTR(err); } } +EXPORT_SYMBOL(bd_start_claiming); + +static void bd_clear_claiming(struct block_device *whole, void *holder) +{ + lockdep_assert_held(&bdev_lock); + /* tell others that we're done */ + BUG_ON(whole->bd_claiming != holder); + whole->bd_claiming = NULL; + wake_up_bit(&whole->bd_claiming, 0); +} + +/** + * bd_finish_claiming - finish claiming of a block device + * @bdev: block device of interest + * @whole: whole block device (returned from bd_start_claiming()) + * @holder: holder that has claimed @bdev + * + * Finish exclusive open of a block device. Mark the device as exlusively + * open by the holder and wake up all waiters for exclusive open to finish. + */ +void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + spin_lock(&bdev_lock); + BUG_ON(!bd_may_claim(bdev, whole, holder)); + /* + * Note that for a whole device bd_holders will be incremented twice, + * and bd_holder will be set to bd_may_claim before being set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_may_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; + bd_clear_claiming(whole, holder); + spin_unlock(&bdev_lock); +} +EXPORT_SYMBOL(bd_finish_claiming); + +/** + * bd_abort_claiming - abort claiming of a block device + * @bdev: block device of interest + * @whole: whole block device (returned from bd_start_claiming()) + * @holder: holder that has claimed @bdev + * + * Abort claiming of a block device when the exclusive open failed. This can be + * also used when exclusive open is not actually desired and we just needed + * to block other exclusive openers for a while. + */ +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + spin_lock(&bdev_lock); + bd_clear_claiming(whole, holder); + spin_unlock(&bdev_lock); +} +EXPORT_SYMBOL(bd_abort_claiming); #ifdef CONFIG_SYSFS struct bd_holder_disk { @@ -1698,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) /* finish claiming */ mutex_lock(&bdev->bd_mutex); - spin_lock(&bdev_lock); - - if (!res) { - BUG_ON(!bd_may_claim(bdev, whole, holder)); - /* - * Note that for a whole device bd_holders - * will be incremented twice, and bd_holder - * will be set to bd_may_claim before being - * set to holder - */ - whole->bd_holders++; - whole->bd_holder = bd_may_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; - } - - /* tell others that we're done */ - BUG_ON(whole->bd_claiming != holder); - whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); - - spin_unlock(&bdev_lock); - + bd_finish_claiming(bdev, whole, holder); /* * Block event polling for write claims if requested. Any * write holder makes the write_holder state stick until diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 89116afda7a2..e5d85311d5d5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, ulist_init(roots); ulist_init(tmp); - trans = btrfs_attach_transaction(root); + trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { ret = PTR_ERR(trans); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 69b59bf75882..c3c0c064c25d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx, { int ret = 0; - if (sctx->cur_ino != sctx->cmp_key->objectid) { - - if (result == BTRFS_COMPARE_TREE_CHANGED) { - struct extent_buffer *leaf_l; - struct extent_buffer *leaf_r; - struct btrfs_file_extent_item *ei_l; - struct btrfs_file_extent_item *ei_r; - - leaf_l = sctx->left_path->nodes[0]; - leaf_r = sctx->right_path->nodes[0]; - ei_l = btrfs_item_ptr(leaf_l, - sctx->left_path->slots[0], - struct btrfs_file_extent_item); - ei_r = btrfs_item_ptr(leaf_r, - sctx->right_path->slots[0], - struct btrfs_file_extent_item); - - /* - * We may have found an extent item that has changed - * only its disk_bytenr field and the corresponding - * inode item was not updated. This case happens due to - * very specific timings during relocation when a leaf - * that contains file extent items is COWed while - * relocation is ongoing and its in the stage where it - * updates data pointers. So when this happens we can - * safely ignore it since we know it's the same extent, - * but just at different logical and physical locations - * (when an extent is fully replaced with a new one, we - * know the generation number must have changed too, - * since snapshot creation implies committing the current - * transaction, and the inode item must have been updated - * as well). - * This replacement of the disk_bytenr happens at - * relocation.c:replace_file_extents() through - * relocation.c:btrfs_reloc_cow_block(). - */ - if (btrfs_file_extent_generation(leaf_l, ei_l) == - btrfs_file_extent_generation(leaf_r, ei_r) && - btrfs_file_extent_ram_bytes(leaf_l, ei_l) == - btrfs_file_extent_ram_bytes(leaf_r, ei_r) && - btrfs_file_extent_compression(leaf_l, ei_l) == - btrfs_file_extent_compression(leaf_r, ei_r) && - btrfs_file_extent_encryption(leaf_l, ei_l) == - btrfs_file_extent_encryption(leaf_r, ei_r) && - btrfs_file_extent_other_encoding(leaf_l, ei_l) == - btrfs_file_extent_other_encoding(leaf_r, ei_r) && - btrfs_file_extent_type(leaf_l, ei_l) == - btrfs_file_extent_type(leaf_r, ei_r) && - btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != - btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && - btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == - btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && - btrfs_file_extent_offset(leaf_l, ei_l) == - btrfs_file_extent_offset(leaf_r, ei_r) && - btrfs_file_extent_num_bytes(leaf_l, ei_l) == - btrfs_file_extent_num_bytes(leaf_r, ei_r)) - return 0; - } - - inconsistent_snapshot_error(sctx, result, "extent"); - return -EIO; - } + /* + * We have found an extent item that changed without the inode item + * having changed. This can happen either after relocation (where the + * disk_bytenr of an extent item is replaced at + * relocation.c:replace_file_extents()) or after deduplication into a + * file in both the parent and send snapshots (where an extent item can + * get modified or replaced with a new one). Note that deduplication + * updates the inode item, but it only changes the iversion (sequence + * field in the inode item) of the inode, so if a file is deduplicated + * the same amount of times in both the parent and send snapshots, its + * iversion becames the same in both snapshots, whence the inode item is + * the same on both snapshots. + */ + if (sctx->cur_ino != sctx->cmp_key->objectid) + return 0; if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result != BTRFS_COMPARE_TREE_DELETED) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3b8ae1a8f02d..e3adb714c04b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | - __TRANS_JOIN), + __TRANS_JOIN | + __TRANS_JOIN_NOSTART), [TRANS_STATE_UNBLOCKED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), [TRANS_STATE_COMPLETED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), }; void btrfs_put_transaction(struct btrfs_transaction *transaction) @@ -543,7 +546,8 @@ again: ret = join_transaction(fs_info, type); if (ret == -EBUSY) { wait_current_trans(fs_info); - if (unlikely(type == TRANS_ATTACH)) + if (unlikely(type == TRANS_ATTACH || + type == TRANS_JOIN_NOSTART)) ret = -ENOENT; } } while (ret == -EBUSY); @@ -660,6 +664,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root } /* + * Similar to regular join but it never starts a transaction when none is + * running or after waiting for the current one to finish. + */ +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) +{ + return start_transaction(root, 0, TRANS_JOIN_NOSTART, + BTRFS_RESERVE_NO_FLUSH, true); +} + +/* * btrfs_attach_transaction() - catch the running transaction * * It is used when we want to commit the current the transaction, but @@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) } } else { spin_unlock(&fs_info->trans_lock); + /* + * The previous transaction was aborted and was already removed + * from the list of transactions at fs_info->trans_list. So we + * abort to prevent writing a new superblock that reflects a + * corrupt state (pointing to trees with unwritten nodes/leafs). + */ + if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) { + ret = -EROFS; + goto cleanup_transaction; + } } extwriter_counter_dec(cur_trans, trans->type); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 527ea94b57d9..2c5a6f6e5bb0 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -94,11 +94,13 @@ struct btrfs_transaction { #define __TRANS_JOIN (1U << 11) #define __TRANS_JOIN_NOLOCK (1U << 12) #define __TRANS_DUMMY (1U << 13) +#define __TRANS_JOIN_NOSTART (1U << 14) #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) #define TRANS_ATTACH (__TRANS_ATTACH) #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) +#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) @@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( int min_factor); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction_barrier( struct btrfs_root *root); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 6e30949d9f77..a7ec2d3dff92 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -638,9 +638,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN) COMPATIBLE_IOCTL(PPPIOCATTCHAN) COMPATIBLE_IOCTL(PPPIOCGCHAN) COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS) -/* PPPOX */ -COMPATIBLE_IOCTL(PPPOEIOCSFWD) -COMPATIBLE_IOCTL(PPPOEIOCDFWD) /* Big A */ /* sparc only */ /* Big Q for sound/OSS */ diff --git a/fs/coredump.c b/fs/coredump.c index e42e17e55bfd..b1ea7dfbd149 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -7,6 +7,7 @@ #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/swap.h> +#include <linux/ctype.h> #include <linux/string.h> #include <linux/init.h> #include <linux/pagemap.h> @@ -187,11 +188,13 @@ put_exe_file: * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ -static int format_corename(struct core_name *cn, struct coredump_params *cprm) +static int format_corename(struct core_name *cn, struct coredump_params *cprm, + size_t **argv, int *argc) { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); + bool was_space = false; int pid_in_pattern = 0; int err = 0; @@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) return -ENOMEM; cn->corename[0] = '\0'; - if (ispipe) + if (ispipe) { + int argvs = sizeof(core_pattern) / 2; + (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); + if (!(*argv)) + return -ENOMEM; + (*argv)[(*argc)++] = 0; ++pat_ptr; + } /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { + /* + * Split on spaces before doing template expansion so that + * %e and %E don't get split if they have spaces in them + */ + if (ispipe) { + if (isspace(*pat_ptr)) { + was_space = true; + pat_ptr++; + continue; + } else if (was_space) { + was_space = false; + err = cn_printf(cn, "%c", '\0'); + if (err) + return err; + (*argv)[(*argc)++] = cn->used; + } + } if (*pat_ptr != '%') { err = cn_printf(cn, "%c", *pat_ptr++); } else { @@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo) struct cred *cred; int retval = 0; int ispipe; + size_t *argv = NULL; + int argc = 0; struct files_struct *displaced; /* require nonrelative corefile path and be extra careful */ bool need_suid_safe = false; @@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo) old_cred = override_creds(cred); - ispipe = format_corename(&cn, &cprm); + ispipe = format_corename(&cn, &cprm, &argv, &argc); if (ispipe) { + int argi; int dump_count; char **helper_argv; struct subprocess_info *sub_info; @@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo) goto fail_dropcount; } - helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); + helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), + GFP_KERNEL); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); goto fail_dropcount; } + for (argi = 0; argi < argc; argi++) + helper_argv[argi] = cn.corename + argv[argi]; + helper_argv[argi] = NULL; retval = -ENOMEM; sub_info = call_usermodehelper_setup(helper_argv[0], @@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) retval = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); - argv_free(helper_argv); + kfree(helper_argv); if (retval) { printk(KERN_INFO "Core dump to |%s pipe failed\n", cn.corename); @@ -766,6 +799,7 @@ fail_dropcount: if (ispipe) atomic_dec(&core_dump_count); fail_unlock: + kfree(argv); kfree(cn.corename); coredump_finish(mm, core_dumped); revert_creds(old_cred); @@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) static void put_unlocked_entry(struct xa_state *xas, void *entry) { /* If we were the only waiter woken, wake the next one */ - if (entry && dax_is_conflict(entry)) + if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, false); } diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f8d46df8fa9e..3e58a6f697dd 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { struct f2fs_inode_info *fi = F2FS_I(inode); - u32 oldflags; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; - oldflags = fi->i_flags; - - if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) - if (!capable(CAP_LINUX_IMMUTABLE)) - return -EPERM; - - fi->i_flags = iflags | (oldflags & ~mask); + fi->i_flags = iflags | (fi->i_flags & ~mask); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); @@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - u32 fsflags; + struct f2fs_inode_info *fi = F2FS_I(inode); + u32 fsflags, old_fsflags; u32 iflags; int ret; @@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) inode_lock(inode); + old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); + if (ret) + goto out; + ret = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); +out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags) return iflags; } -static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) +static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) { - struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); - struct fsxattr fa; - memset(&fa, 0, sizeof(struct fsxattr)); - fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags); + simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) - fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, - fi->i_projid); - - if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) - return -EFAULT; - return 0; + fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); } -static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) +static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) { - /* - * Project Quota ID state is only allowed to change from within the init - * namespace. Enforce that restriction only if we are trying to change - * the quota ID state. Everything else is allowed in user namespaces. - */ - if (current_user_ns() == &init_user_ns) - return 0; + struct inode *inode = file_inode(filp); + struct fsxattr fa; - if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid) - return -EINVAL; - - if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) { - if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT)) - return -EINVAL; - } else { - if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) - return -EINVAL; - } + f2fs_fill_fsxattr(inode, &fa); + if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) + return -EFAULT; return 0; } static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - struct fsxattr fa; + struct fsxattr fa, old_fa; u32 iflags; int err; @@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) return err; inode_lock(inode); - err = f2fs_ioctl_check_project(inode, &fa); + + f2fs_fill_fsxattr(inode, &old_fa); + err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); if (err) goto out; + err = f2fs_setflags_common(inode, iflags, f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); if (err) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6691f526fa40..8974672db78f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx, if (lfs_mode) down_write(&fio.sbi->io_order_lock); + mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + fio.old_blkaddr, false); + if (!mpage) + goto up_out; + + fio.encrypted_page = mpage; + + /* read source block in mpage */ + if (!PageUptodate(mpage)) { + err = f2fs_submit_page_bio(&fio); + if (err) { + f2fs_put_page(mpage, 1); + goto up_out; + } + lock_page(mpage); + if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || + !PageUptodate(mpage))) { + err = -EIO; + f2fs_put_page(mpage, 1); + goto up_out; + } + } + f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); @@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - goto recover_block; - } - - mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), - fio.old_blkaddr, FGP_LOCK, GFP_NOFS); - if (mpage) { - bool updated = false; - - if (PageUptodate(mpage)) { - memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - updated = true; - } f2fs_put_page(mpage, 1); - invalidate_mapping_pages(META_MAPPING(fio.sbi), - fio.old_blkaddr, fio.old_blkaddr); - if (updated) - goto write_page; - } - - err = f2fs_submit_page_bio(&fio); - if (err) - goto put_page_out; - - /* write page */ - lock_page(fio.encrypted_page); - - if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { - err = -EIO; - goto put_page_out; - } - if (unlikely(!PageUptodate(fio.encrypted_page))) { - err = -EIO; - goto put_page_out; + goto recover_block; } -write_page: + /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); + memcpy(page_address(fio.encrypted_page), + page_address(mpage), PAGE_SIZE); + f2fs_put_page(mpage, 1); + invalidate_mapping_pages(META_MAPPING(fio.sbi), + fio.old_blkaddr, fio.old_blkaddr); + set_page_dirty(fio.encrypted_page); if (clear_page_dirty_for_io(fio.encrypted_page)) dec_page_count(fio.sbi, F2FS_DIRTY_META); @@ -871,11 +868,12 @@ write_page: put_page_out: f2fs_put_page(fio.encrypted_page, 1); recover_block: - if (lfs_mode) - up_write(&fio.sbi->io_order_lock); if (err) f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, true, true); +up_out: + if (lfs_mode) + up_write(&fio.sbi->io_order_lock); put_out: f2fs_put_dnode(&dn); out: diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 6de6cda44031..78a1b873e48a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, size_t crc_offset = 0; __u32 crc = 0; + if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) { + f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); + return -EINVAL; + } + /* Check checksum_offset and crc in superblock */ if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) { crc_offset = le32_to_cpu(raw_super->checksum_offset); @@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, offsetof(struct f2fs_super_block, crc)) { f2fs_info(sbi, "Invalid SB checksum offset: %zu", crc_offset); - return 1; + return -EFSCORRUPTED; } crc = le32_to_cpu(raw_super->crc); if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { f2fs_info(sbi, "Invalid SB checksum value: %u", crc); - return 1; + return -EFSCORRUPTED; } } - if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { - f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", - F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); - return 1; - } - /* Currently, support only 4KB page cache size */ if (F2FS_BLKSIZE != PAGE_SIZE) { f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", PAGE_SIZE); - return 1; + return -EFSCORRUPTED; } /* Currently, support only 4KB block size */ @@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (blocksize != F2FS_BLKSIZE) { f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", blocksize); - return 1; + return -EFSCORRUPTED; } /* check log blocks per segment */ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { f2fs_info(sbi, "Invalid log blocks per segment (%u)", le32_to_cpu(raw_super->log_blocks_per_seg)); - return 1; + return -EFSCORRUPTED; } /* Currently, support 512/1024/2048/4096 bytes sector size */ @@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, F2FS_MIN_LOG_SECTOR_SIZE) { f2fs_info(sbi, "Invalid log sectorsize (%u)", le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->log_sectors_per_block) + le32_to_cpu(raw_super->log_sectorsize) != @@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", le32_to_cpu(raw_super->log_sectors_per_block), le32_to_cpu(raw_super->log_sectorsize)); - return 1; + return -EFSCORRUPTED; } segment_count = le32_to_cpu(raw_super->segment_count); @@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (segment_count > F2FS_MAX_SEGMENT || segment_count < F2FS_MIN_SEGMENTS) { f2fs_info(sbi, "Invalid segment count (%u)", segment_count); - return 1; + return -EFSCORRUPTED; } if (total_sections > segment_count || @@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, segs_per_sec > segment_count || !segs_per_sec) { f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", segment_count, total_sections, segs_per_sec); - return 1; + return -EFSCORRUPTED; } if ((segment_count / segs_per_sec) < total_sections) { f2fs_info(sbi, "Small segment_count (%u < %u * %u)", segment_count, segs_per_sec, total_sections); - return 1; + return -EFSCORRUPTED; } if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", segment_count, le64_to_cpu(raw_super->block_count)); - return 1; + return -EFSCORRUPTED; } if (secs_per_zone > total_sections || !secs_per_zone) { f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", secs_per_zone, total_sections); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || raw_super->hot_ext_count > F2FS_MAX_EXTENSION || @@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->extension_count), raw_super->hot_ext_count, F2FS_MAX_EXTENSION); - return 1; + return -EFSCORRUPTED; } if (le32_to_cpu(raw_super->cp_payload) > @@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, f2fs_info(sbi, "Insane cp_payload (%u > %u)", le32_to_cpu(raw_super->cp_payload), blocks_per_seg - F2FS_CP_PACKS); - return 1; + return -EFSCORRUPTED; } /* check reserved ino info */ @@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, le32_to_cpu(raw_super->node_ino), le32_to_cpu(raw_super->meta_ino), le32_to_cpu(raw_super->root_ino)); - return 1; + return -EFSCORRUPTED; } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ if (sanity_check_area_boundary(sbi, bh)) - return 1; + return -EFSCORRUPTED; return 0; } @@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, } /* sanity checking of raw super */ - if (sanity_check_raw_super(sbi, bh)) { + err = sanity_check_raw_super(sbi, bh); + if (err) { f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", block + 1); - err = -EFSCORRUPTED; brelse(bh); continue; } diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 79581b9bdebb..4df26ef2b2b1 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, unsigned copied, struct page *page, struct iomap *iomap) { + struct gfs2_trans *tr = current->journal_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); if (page && !gfs2_is_stuffed(ip)) gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); + + if (tr->tr_num_buf_new) + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + gfs2_trans_end(sdp); } @@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, tr = current->journal_info; if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); - else - gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]); gfs2_trans_end(sdp); } @@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, if (ip->i_qadata && ip->i_qadata->qa_qd_num) gfs2_quota_unlock(ip); + + if (unlikely(!written)) + goto out_unlock; + if (iomap->flags & IOMAP_F_SIZE_CHANGED) mark_inode_dirty(inode); - gfs2_write_unlock(inode); + set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); +out_unlock: + gfs2_write_unlock(inode); out: return 0; } diff --git a/fs/io_uring.c b/fs/io_uring.c index 012bc0efb9d3..d542f1cf4428 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1838,6 +1838,7 @@ restart: do { struct sqe_submit *s = &req->submit; const struct io_uring_sqe *sqe = s->sqe; + unsigned int flags = req->flags; /* Ensure we clear previously set non-block flag */ req->rw.ki_flags &= ~IOCB_NOWAIT; @@ -1883,7 +1884,7 @@ restart: kfree(sqe); /* req from defer and link list needn't decrease async cnt */ - if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) + if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) goto out; if (!async_list) diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 385f3aaa2448..90c830e3758e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int low_bucket = 0, bucket, high_bucket; struct ocfs2_xattr_bucket *search; - u32 last_hash; u64 blkno, lower_blkno = 0; search = ocfs2_xattr_bucket_new(inode); @@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, if (xh->xh_count) xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; - last_hash = le32_to_cpu(xe->xe_name_hash); - /* record lower_blkno which may be the insert place. */ lower_blkno = blkno; diff --git a/fs/super.c b/fs/super.c index 113c58f19425..5960578a4076 100644 --- a/fs/super.c +++ b/fs/super.c @@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super); bool mount_capable(struct fs_context *fc) { - struct user_namespace *user_ns = fc->global ? &init_user_ns - : fc->user_ns; - if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) return capable(CAP_SYS_ADMIN); else - return ns_capable(user_ns, CAP_SYS_ADMIN); + return ns_capable(fc->user_ns, CAP_SYS_ADMIN); } /** diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index 94c4f1de1922..77ff9f97bcda 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c @@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling( /* Compare upper level pointer to sibling pointer. */ if (ds->state->altpath.blk[level].blkno != sibling) xchk_da_set_corrupt(ds, level); - xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp); + if (ds->state->altpath.blk[level].bp) { + xfs_trans_brelse(ds->dargs.trans, + ds->state->altpath.blk[level].bp); + ds->state->altpath.blk[level].bp = NULL; + } out: return error; } diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index a8a06bb78ea8..f5c955d35be4 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat( struct xfs_bstat *bs1, const struct xfs_bulkstat *bstat) { + /* memset is needed here because of padding holes in the structure. */ memset(bs1, 0, sizeof(struct xfs_bstat)); bs1->bs_ino = bstat->bs_ino; bs1->bs_mode = bstat->bs_mode; @@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp( struct xfs_inogrp *ig1, const struct xfs_inumbers *ig) { + /* memset is needed here because of padding holes in the structure. */ + memset(ig1, 0, sizeof(struct xfs_inogrp)); ig1->xi_startino = ig->xi_startino; ig1->xi_alloccount = ig->xi_alloccount; ig1->xi_allocmask = ig->xi_allocmask; |