diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-21 23:26:33 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-21 23:26:33 +0200 |
commit | fbc246a12aac27f7b25a37f9398bb3bc552cec92 (patch) | |
tree | 4b28f91eeda2c6d7d4db86e7c97fca2d305b6706 /fs/f2fs/data.c | |
parent | Merge tag 'for_v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jac... (diff) | |
parent | f2fs: add a condition to detect overflow in f2fs_ioc_gc_range() (diff) | |
download | linux-fbc246a12aac27f7b25a37f9398bb3bc552cec92.tar.xz linux-fbc246a12aac27f7b25a37f9398bb3bc552cec92.zip |
Merge tag 'f2fs-for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim:
"In this round, we introduced casefolding support in f2fs, and fixed
various bugs in individual features such as IO alignment,
checkpoint=disable, quota, and swapfile.
Enhancement:
- support casefolding w/ enhancement in ext4
- support fiemap for directory
- support FS_IO_GET|SET_FSLABEL
Bug fix:
- fix IO stuck during checkpoint=disable
- avoid infinite GC loop
- fix panic/overflow related to IO alignment feature
- fix livelock in swap file
- fix discard command leak
- disallow dio for atomic_write"
* tag 'f2fs-for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (51 commits)
f2fs: add a condition to detect overflow in f2fs_ioc_gc_range()
f2fs: fix to add missing F2FS_IO_ALIGNED() condition
f2fs: fix to fallback to buffered IO in IO aligned mode
f2fs: fix to handle error path correctly in f2fs_map_blocks
f2fs: fix extent corrupotion during directIO in LFS mode
f2fs: check all the data segments against all node ones
f2fs: Add a small clarification to CONFIG_FS_F2FS_FS_SECURITY
f2fs: fix inode rwsem regression
f2fs: fix to avoid accessing uninitialized field of inode page in is_alive()
f2fs: avoid infinite GC loop due to stale atomic files
f2fs: Fix indefinite loop in f2fs_gc()
f2fs: convert inline_data in prior to i_size_write
f2fs: fix error path of f2fs_convert_inline_page()
f2fs: add missing documents of reserve_root/resuid/resgid
f2fs: fix flushing node pages when checkpoint is disabled
f2fs: enhance f2fs_is_checkpoint_ready()'s readability
f2fs: clean up __bio_alloc()'s parameter
f2fs: fix wrong error injection path in inc_valid_block_count()
f2fs: fix to writeout dirty inode during node flush
f2fs: optimize case-insensitive lookups
...
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r-- | fs/f2fs/data.c | 104 |
1 files changed, 70 insertions, 34 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 54cad80acb7d..5755e897a5f0 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -283,26 +283,25 @@ static bool __same_bdev(struct f2fs_sb_info *sbi, /* * Low-level block read/write IO operations. */ -static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, - struct writeback_control *wbc, - int npages, bool is_read, - enum page_type type, enum temp_type temp) +static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) { + struct f2fs_sb_info *sbi = fio->sbi; struct bio *bio; bio = f2fs_bio_alloc(sbi, npages, true); - f2fs_target_device(sbi, blk_addr, bio); - if (is_read) { + f2fs_target_device(sbi, fio->new_blkaddr, bio); + if (is_read_io(fio->op)) { bio->bi_end_io = f2fs_read_end_io; bio->bi_private = NULL; } else { bio->bi_end_io = f2fs_write_end_io; bio->bi_private = sbi; - bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp); + bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, + fio->type, fio->temp); } - if (wbc) - wbc_init_bio(wbc, bio); + if (fio->io_wbc) + wbc_init_bio(fio->io_wbc, bio); return bio; } @@ -319,6 +318,9 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, if (test_opt(sbi, LFS) && current->plug) blk_finish_plug(current->plug); + if (F2FS_IO_ALIGNED(sbi)) + goto submit_io; + start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; start %= F2FS_IO_SIZE(sbi); @@ -485,8 +487,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) f2fs_trace_ios(fio, 0); /* Allocate a new bio */ - bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, - 1, is_read_io(fio->op), fio->type, fio->temp); + bio = __bio_alloc(fio, 1); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); @@ -505,6 +506,43 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) return 0; } +static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, + block_t last_blkaddr, block_t cur_blkaddr) +{ + if (last_blkaddr + 1 != cur_blkaddr) + return false; + return __same_bdev(sbi, cur_blkaddr, bio); +} + +static bool io_type_is_mergeable(struct f2fs_bio_info *io, + struct f2fs_io_info *fio) +{ + if (io->fio.op != fio->op) + return false; + return io->fio.op_flags == fio->op_flags; +} + +static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, + struct f2fs_bio_info *io, + struct f2fs_io_info *fio, + block_t last_blkaddr, + block_t cur_blkaddr) +{ + if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) { + unsigned int filled_blocks = + F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size); + unsigned int io_size = F2FS_IO_SIZE(sbi); + unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt; + + /* IOs in bio is aligned and left space of vectors is not enough */ + if (!(filled_blocks % io_size) && left_vecs < io_size) + return false; + } + if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) + return false; + return io_type_is_mergeable(io, fio); +} + int f2fs_merge_page_bio(struct f2fs_io_info *fio) { struct bio *bio = *fio->bio; @@ -518,15 +556,14 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); - if (bio && (*fio->last_block + 1 != fio->new_blkaddr || - !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) { + if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, + fio->new_blkaddr)) { __submit_bio(fio->sbi, bio, fio->type); bio = NULL; } alloc_new: if (!bio) { - bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, - BIO_MAX_PAGES, false, fio->type, fio->temp); + bio = __bio_alloc(fio, BIO_MAX_PAGES); bio_set_op_attrs(bio, fio->op, fio->op_flags); } @@ -592,21 +629,19 @@ next: inc_page_count(sbi, WB_DATA_TYPE(bio_page)); - if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || - (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) || - !__same_bdev(sbi, fio->new_blkaddr, io->bio))) + if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, + io->last_block_in_bio, fio->new_blkaddr)) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { - if ((fio->type == DATA || fio->type == NODE) && + if (F2FS_IO_ALIGNED(sbi) && + (fio->type == DATA || fio->type == NODE) && fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { dec_page_count(sbi, WB_DATA_TYPE(bio_page)); fio->retry = true; goto skip; } - io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc, - BIO_MAX_PAGES, false, - fio->type, fio->temp); + io->bio = __bio_alloc(fio, BIO_MAX_PAGES); io->fio = *fio; } @@ -627,7 +662,7 @@ skip: goto next; out: if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || - f2fs_is_checkpoint_ready(sbi)) + !f2fs_is_checkpoint_ready(sbi)) __submit_merged_bio(io); up_write(&io->io_rwsem); } @@ -1022,7 +1057,7 @@ alloc: if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) invalidate_mapping_pages(META_MAPPING(sbi), old_blkaddr, old_blkaddr); - f2fs_set_data_blkaddr(dn); + f2fs_update_data_blkaddr(dn, dn->data_blkaddr); /* * i_size will be updated by direct_IO. Otherwise, we'll get stale @@ -1199,10 +1234,10 @@ next_block: if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && map->m_may_create) { err = __allocate_data_block(&dn, map->m_seg_type); - if (!err) { - blkaddr = dn.data_blkaddr; - set_inode_flag(inode, FI_APPEND_WRITE); - } + if (err) + goto sync_out; + blkaddr = dn.data_blkaddr; + set_inode_flag(inode, FI_APPEND_WRITE); } } else { if (create) { @@ -1407,7 +1442,7 @@ static int get_data_block_dio_write(struct inode *inode, sector_t iblock, return __get_data_block(inode, iblock, bh_result, create, F2FS_GET_BLOCK_DIO, NULL, f2fs_rw_hint_to_seg_type(inode->i_write_hint), - true); + IS_SWAPFILE(inode) ? false : true); } static int get_data_block_dio(struct inode *inode, sector_t iblock, @@ -1538,7 +1573,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, goto out; } - if (f2fs_has_inline_data(inode)) { + if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); if (ret != -EAGAIN) goto out; @@ -1691,8 +1726,8 @@ zero_out: * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (*last_block_in_bio != block_nr - 1 || - !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) { + if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, + *last_block_in_bio, block_nr)) { submit_and_realloc: __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; @@ -2590,9 +2625,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, trace_f2fs_write_begin(inode, pos, len, flags); - err = f2fs_is_checkpoint_ready(sbi); - if (err) + if (!f2fs_is_checkpoint_ready(sbi)) { + err = -ENOSPC; goto fail; + } if ((f2fs_is_atomic_file(inode) && !f2fs_available_free_memory(sbi, INMEM_PAGES)) || |