summaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2020-05-25 10:12:15 +0200
committerTheodore Ts'o <tytso@mit.edu>2020-06-04 05:16:56 +0200
commit6b8ed62008a49751fc71fefd2a4f89202a7c2d4d (patch)
treeb951c494c7e143086200904852bc48edae3c3140 /fs/ext4/inode.c
parentext4: don't block for O_DIRECT if IOCB_NOWAIT is set (diff)
downloadlinux-6b8ed62008a49751fc71fefd2a4f89202a7c2d4d.tar.xz
linux-6b8ed62008a49751fc71fefd2a4f89202a7c2d4d.zip
ext4: avoid unnecessary transaction starts during writeback
ext4_writepages() currently works in a loop like: start a transaction scan inode for pages to write map and submit these pages stop the transaction This loop results in starting transaction once more than is needed because in the last iteration we start a transaction only to scan the inode and find there are no pages to write. This can be significant increase in number of transaction starts for single-extent files or files that have all blocks already mapped. Furthermore we already know from previous iteration whether there are more pages to write or not. So propagate the information from mpage_prepare_extent_to_map() and avoid unnecessary looping in case there are no more pages to write. Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20200525081215.29451-1-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to '')
-rw-r--r--fs/ext4/inode.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e7bf9388538b..6694f0c8e0f7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1534,6 +1534,7 @@ struct mpage_da_data {
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
unsigned int do_map:1;
+ unsigned int scanned_until_end:1;
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
@@ -1549,6 +1550,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
if (mpd->first_page >= mpd->next_page)
return;
+ mpd->scanned_until_end = 0;
index = mpd->first_page;
end = mpd->next_page - 1;
if (invalidate) {
@@ -2195,7 +2197,11 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
if (err < 0)
return err;
}
- return lblk < blocks;
+ if (lblk >= blocks) {
+ mpd->scanned_until_end = 1;
+ return 0;
+ }
+ return 1;
}
/*
@@ -2553,7 +2559,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
- goto out;
+ break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2608,6 +2614,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
pagevec_release(&pvec);
cond_resched();
}
+ mpd->scanned_until_end = 1;
return 0;
out:
pagevec_release(&pvec);
@@ -2626,7 +2633,6 @@ static int ext4_writepages(struct address_space *mapping,
struct inode *inode = mapping->host;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
- bool done;
struct blk_plug plug;
bool give_up_on_write = false;
@@ -2712,7 +2718,6 @@ static int ext4_writepages(struct address_space *mapping,
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
- done = false;
blk_start_plug(&plug);
/*
@@ -2722,6 +2727,7 @@ retry:
* started.
*/
mpd.do_map = 0;
+ mpd.scanned_until_end = 0;
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
ret = -ENOMEM;
@@ -2737,7 +2743,7 @@ retry:
if (ret < 0)
goto unplug;
- while (!done && mpd.first_page <= mpd.last_page) {
+ while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
@@ -2772,20 +2778,9 @@ retry:
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
- if (!ret) {
- if (mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd,
+ if (!ret && mpd.map.m_len)
+ ret = mpage_map_and_submit_extent(handle, &mpd,
&give_up_on_write);
- else {
- /*
- * We scanned the whole range (or exhausted
- * nr_to_write), submitted what was mapped and
- * didn't find anything needing mapping. We are
- * done.
- */
- done = true;
- }
- }
/*
* Caution: If the handle is synchronous,
* ext4_journal_stop() can wait for transaction commit