diff options
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/ext4.h | 1 | ||||
-rw-r--r-- | fs/ext4/ext4_jbd2.c | 2 | ||||
-rw-r--r-- | fs/ext4/file.c | 56 | ||||
-rw-r--r-- | fs/ext4/fsync.c | 2 | ||||
-rw-r--r-- | fs/ext4/ialloc.c | 2 | ||||
-rw-r--r-- | fs/ext4/inode.c | 26 | ||||
-rw-r--r-- | fs/ext4/mmp.c | 2 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 4 | ||||
-rw-r--r-- | fs/ext4/readpage.c | 2 | ||||
-rw-r--r-- | fs/ext4/super.c | 83 |
10 files changed, 67 insertions, 113 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 84b9da192238..e2abe01c8c6b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1526,6 +1526,7 @@ struct ext4_sb_info { /* Barrier between changing inodes' journal flags and writepages ops. */ struct percpu_rw_semaphore s_journal_flag_rwsem; + struct dax_device *s_daxdev; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index dd106b1d5d89..5b342ac67d2e 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -47,7 +47,7 @@ static int ext4_journal_check_start(struct super_block *sb) if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) return -EIO; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return -EROFS; WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 197653ea6041..b1da660ac3bc 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -223,6 +223,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (IS_DAX(inode)) return ext4_dax_write_iter(iocb, from); #endif + if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT)) + return -EOPNOTSUPP; if (!inode_trylock(inode)) { if (iocb->ki_flags & IOCB_NOWAIT) @@ -324,41 +326,11 @@ static int ext4_dax_fault(struct vm_fault *vmf) return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); } -/* - * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() - * handler we check for races agaist truncate. Note that since we cycle through - * i_mmap_sem, we are sure that also any hole punching that began before we - * were called is finished by now and so if it included part of the file we - * are working on, our pte will get unmapped and the check for pte_same() in - * wp_pfn_shared() fails. Thus fault gets retried and things work out as - * desired. - */ -static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) -{ - struct inode *inode = file_inode(vmf->vma->vm_file); - struct super_block *sb = inode->i_sb; - loff_t size; - int ret; - - sb_start_pagefault(sb); - file_update_time(vmf->vma->vm_file); - down_read(&EXT4_I(inode)->i_mmap_sem); - size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (vmf->pgoff >= size) - ret = VM_FAULT_SIGBUS; - else - ret = dax_pfn_mkwrite(vmf); - up_read(&EXT4_I(inode)->i_mmap_sem); - sb_end_pagefault(sb); - - return ret; -} - static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, .huge_fault = ext4_dax_huge_fault, .page_mkwrite = ext4_dax_fault, - .pfn_mkwrite = ext4_dax_pfn_mkwrite, + .pfn_mkwrite = ext4_dax_fault, }; #else #define ext4_dax_vm_ops ext4_file_vm_ops @@ -401,7 +373,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) return -EIO; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && - !(sb->s_flags & MS_RDONLY))) { + !sb_rdonly(sb))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; /* * Sample where the filesystem has been mounted and @@ -461,9 +433,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) return ret; } - /* Set the flags to support nowait AIO */ - filp->f_mode |= FMODE_AIO_NOWAIT; - + filp->f_mode |= FMODE_NOWAIT; return dquot_file_open(inode, filp); } @@ -507,12 +477,11 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, pagevec_init(&pvec, 0); do { - int i, num; + int i; unsigned long nr_pages; - num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; - nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, - (pgoff_t)num); + nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, + &index, end); if (nr_pages == 0) break; @@ -531,9 +500,6 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, goto out; } - if (page->index > end) - goto out; - lock_page(page); if (unlikely(page->mapping != inode->i_mapping)) { @@ -576,14 +542,10 @@ next: unlock_page(page); } - /* The no. of pages is less than our desired, we are done. */ - if (nr_pages < num) - break; - - index = pvec.pages[i - 1]->index + 1; pagevec_release(&pvec); } while (index <= end); + /* There are no pages upto endoff - that would be a hole in there. */ if (whence == SEEK_HOLE && lastoff < endoff) { found = 1; *offset = lastoff; diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index aae2c3971cef..f9230580a84b 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -107,7 +107,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) trace_ext4_sync_file_enter(file, datasync); - if (inode->i_sb->s_flags & MS_RDONLY) { + if (sb_rdonly(inode->i_sb)) { /* Make sure that we read updated s_mount_flags value */ smp_rmb(); if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED) diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 71e93a23cec3..ee823022aa34 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1382,7 +1382,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int num, ret = 0, used_blks = 0; /* This should not happen, but just to be sure check this */ - if (sb->s_flags & MS_RDONLY) { + if (sb_rdonly(sb)) { ret = 1; goto out; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 714396760616..31db875bc7a1 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1720,13 +1720,12 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, pagevec_init(&pvec, 0); while (index <= end) { - nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); + nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - if (page->index > end) - break; + BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { @@ -1737,7 +1736,6 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, } unlock_page(page); } - index = pvec.pages[nr_pages - 1]->index + 1; pagevec_release(&pvec); } } @@ -2348,17 +2346,13 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) pagevec_init(&pvec, 0); while (start <= end) { - nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, - PAGEVEC_SIZE); + nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, + &start, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - if (page->index > end) - break; - /* Up to 'end' pages must be contiguous */ - BUG_ON(page->index != start); bh = head = page_buffers(page); do { if (lblk < mpd->map.m_lblk) @@ -2403,7 +2397,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) pagevec_release(&pvec); return err; } - start++; } pagevec_release(&pvec); } @@ -3404,7 +3397,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait) static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap) { - struct block_device *bdev; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned int blkbits = inode->i_blkbits; unsigned long first_block = offset >> blkbits; unsigned long last_block = (offset + length - 1) >> blkbits; @@ -3473,12 +3466,8 @@ retry: } iomap->flags = 0; - bdev = inode->i_sb->s_bdev; - iomap->bdev = bdev; - if (blk_queue_dax(bdev->bd_queue)) - iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); - else - iomap->dax_dev = NULL; + iomap->bdev = inode->i_sb->s_bdev; + iomap->dax_dev = sbi->s_daxdev; iomap->offset = first_block << blkbits; if (ret == 0) { @@ -3511,7 +3500,6 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, int blkbits = inode->i_blkbits; bool truncate = false; - fs_put_dax(iomap->dax_dev); if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) return 0; diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 77cdce1f17ce..84c54f15f1dd 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -185,7 +185,7 @@ static int kmmpd(void *data) goto exit_thread; } - if (sb->s_flags & MS_RDONLY) { + if (sb_rdonly(sb)) { ext4_warning(sb, "kmmpd being stopped since filesystem " "has been remounted as readonly."); goto exit_thread; diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index c2fce4478cca..55ad7dd149d0 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -300,7 +300,7 @@ static void ext4_end_bio(struct bio *bio) char b[BDEVNAME_SIZE]; if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n", - bdevname(bio->bi_bdev, b), + bio_devname(bio, b), (long long) bio->bi_iter.bi_sector, (unsigned) bio_sectors(bio), bio->bi_status)) { @@ -375,7 +375,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io, return -ENOMEM; wbc_init_bio(io->io_wbc, bio); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio->bi_bdev = bh->b_bdev; + bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 40a5497b0f60..04c90643af7a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -254,7 +254,7 @@ int ext4_mpage_readpages(struct address_space *mapping, fscrypt_release_ctx(ctx); goto set_error_page; } - bio->bi_bdev = bdev; + bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c9e7be58756b..b104096fce9e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -405,7 +405,7 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) static void ext4_handle_error(struct super_block *sb) { - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return; if (!test_opt(sb, ERRORS_CONT)) { @@ -587,8 +587,7 @@ void __ext4_std_error(struct super_block *sb, const char *function, /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ - if (errno == -EROFS && journal_current_handle() == NULL && - (sb->s_flags & MS_RDONLY)) + if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) return; if (ext4_error_ratelimit(sb)) { @@ -628,7 +627,7 @@ void __ext4_abort(struct super_block *sb, const char *function, sb->s_id, function, line, &vaf); va_end(args); - if ((sb->s_flags & MS_RDONLY) == 0) { + if (sb_rdonly(sb) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; /* @@ -889,11 +888,11 @@ static void ext4_put_super(struct super_block *sb) ext4_mb_release(sb); ext4_ext_release(sb); - if (!(sb->s_flags & MS_RDONLY) && !aborted) { + if (!sb_rdonly(sb) && !aborted) { ext4_clear_feature_journal_needs_recovery(sb); es->s_state = cpu_to_le16(sbi->s_mount_state); } - if (!(sb->s_flags & MS_RDONLY)) + if (!sb_rdonly(sb)) ext4_commit_super(sb, 1); for (i = 0; i < sbi->s_gdb_count; i++) @@ -951,6 +950,7 @@ static void ext4_put_super(struct super_block *sb) if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); + fs_put_dax(sbi->s_daxdev); kfree(sbi); } @@ -2099,7 +2099,7 @@ int ext4_seq_options_show(struct seq_file *seq, void *offset) struct super_block *sb = seq->private; int rc; - seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw"); + seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); rc = _ext4_show_options(seq, sb, 1); seq_puts(seq, "\n"); return rc; @@ -2367,7 +2367,7 @@ static int ext4_check_descriptors(struct super_block *sb, "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sb, i, gdp)), le16_to_cpu(gdp->bg_checksum)); - if (!(sb->s_flags & MS_RDONLY)) { + if (!sb_rdonly(sb)) { ext4_unlock_group(sb, i); return 0; } @@ -3135,8 +3135,7 @@ int ext4_register_li_request(struct super_block *sb, goto out; } - if (first_not_zeroed == ngroups || - (sb->s_flags & MS_RDONLY) || + if (first_not_zeroed == ngroups || sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) goto out; @@ -3398,6 +3397,7 @@ static void ext4_set_resv_clusters(struct super_block *sb) static int ext4_fill_super(struct super_block *sb, void *data, int silent) { + struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev); char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; @@ -3423,6 +3423,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if ((data && !orig_data) || !sbi) goto out_free_base; + sbi->s_daxdev = dax_dev; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) @@ -3680,7 +3681,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ - if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) + if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) goto failed_mount; blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); @@ -3809,12 +3810,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ - if (!(sb->s_flags & MS_RDONLY)) + if (!sb_rdonly(sb)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else - if (!(sb->s_flags & MS_RDONLY)) + if (!sb_rdonly(sb)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif @@ -4014,7 +4015,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) needs_recovery = (es->s_last_orphan != 0 || ext4_has_feature_journal_needs_recovery(sb)); - if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY)) + if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3a; @@ -4026,7 +4027,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) err = ext4_load_journal(sb, es, journal_devnum); if (err) goto failed_mount3a; - } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && + } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); @@ -4140,7 +4141,7 @@ no_journal: goto failed_mount_wq; } - if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) && + if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) && !ext4_has_feature_encrypt(sb)) { ext4_set_feature_encrypt(sb); ext4_commit_super(sb, 1); @@ -4194,7 +4195,7 @@ no_journal: goto failed_mount4; } - if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) + if (ext4_setup_super(sb, es, sb_rdonly(sb))) sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ @@ -4282,7 +4283,7 @@ no_journal: #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ - if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) { + if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { err = ext4_enable_quotas(sb); if (err) goto failed_mount8; @@ -4399,6 +4400,7 @@ out_fail: out_free_base: kfree(sbi); kfree(orig_data); + fs_put_dax(dax_dev); return err ? err : ret; } @@ -4605,7 +4607,7 @@ static int ext4_load_journal(struct super_block *sb, * can get read-write access to the device. */ if (ext4_has_feature_journal_needs_recovery(sb)) { - if (sb->s_flags & MS_RDONLY) { + if (sb_rdonly(sb)) { ext4_msg(sb, KERN_INFO, "INFO: recovery " "required on readonly filesystem"); if (really_read_only) { @@ -4760,8 +4762,7 @@ static void ext4_mark_recovery_complete(struct super_block *sb, if (jbd2_journal_flush(journal) < 0) goto out; - if (ext4_has_feature_journal_needs_recovery(sb) && - sb->s_flags & MS_RDONLY) { + if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } @@ -4817,7 +4818,7 @@ int ext4_force_commit(struct super_block *sb) { journal_t *journal; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return 0; journal = EXT4_SB(sb)->s_journal; @@ -4882,7 +4883,7 @@ static int ext4_freeze(struct super_block *sb) int error = 0; journal_t *journal; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return 0; journal = EXT4_SB(sb)->s_journal; @@ -4917,7 +4918,7 @@ out: */ static int ext4_unfreeze(struct super_block *sb) { - if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb))) + if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb))) return 0; if (EXT4_SB(sb)->s_journal) { @@ -5055,7 +5056,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (*flags & MS_LAZYTIME) sb->s_flags |= MS_LAZYTIME; - if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; goto restore_opts; @@ -5150,7 +5151,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * Reinitialize lazy itable initialization thread based on * current settings */ - if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) + if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) ext4_unregister_li_request(sb); else { ext4_group_t first_not_zeroed; @@ -5215,7 +5216,7 @@ static int ext4_statfs_project(struct super_block *sb, dquot = dqget(sb, qid); if (IS_ERR(dquot)) return PTR_ERR(dquot); - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); limit = (dquot->dq_dqb.dqb_bsoftlimit ? dquot->dq_dqb.dqb_bsoftlimit : @@ -5238,7 +5239,7 @@ static int ext4_statfs_project(struct super_block *sb, (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; } - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); dqput(dquot); return 0; } @@ -5284,18 +5285,13 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -/* Helper function for writing quotas on sync - we need to start transaction - * before quota file is locked for write. Otherwise the are possible deadlocks: - * Process 1 Process 2 - * ext4_create() quota_sync() - * jbd2_journal_start() write_dquot() - * dquot_initialize() down(dqio_mutex) - * down(dqio_mutex) jbd2_journal_start() - * - */ #ifdef CONFIG_QUOTA +/* + * Helper functions so that transaction is started before we acquire dqio_sem + * to keep correct lock ordering of transaction > dqio_sem + */ static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; @@ -5430,6 +5426,13 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); + sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; + } else { + /* + * Clear the flag just in case mount options changed since + * last time. + */ + sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; } /* @@ -5526,7 +5529,7 @@ static int ext4_enable_quotas(struct super_block *sb) test_opt(sb, PRJQUOTA), }; - sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; + sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; for (type = 0; type < EXT4_MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, @@ -5725,7 +5728,7 @@ static inline int ext2_feature_set_ok(struct super_block *sb) { if (ext4_has_unknown_ext2_incompat_features(sb)) return 0; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return 1; if (ext4_has_unknown_ext2_ro_compat_features(sb)) return 0; @@ -5756,7 +5759,7 @@ static inline int ext3_feature_set_ok(struct super_block *sb) return 0; if (!ext4_has_feature_journal(sb)) return 0; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) return 1; if (ext4_has_unknown_ext3_ro_compat_features(sb)) return 0; |