summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@redhat.com>2010-10-28 03:30:13 +0200
committerTheodore Ts'o <tytso@mit.edu>2010-10-28 03:30:13 +0200
commit72f84e6560d18d60a091df27edf81409be6641cb (patch)
treee2fa11476b074f8526d620820f8b2e86e8a11412 /fs
parentext4: implement writeback livelock avoidance using page tagging (diff)
downloadlinux-72f84e6560d18d60a091df27edf81409be6641cb.tar.xz
linux-72f84e6560d18d60a091df27edf81409be6641cb.zip
ext4: update writeback_index based on last page scanned
As pointed out in a prior patch, updating the mapping's writeback_index based on pages written isn't quite right; what the writeback index is really supposed to reflect is the next page which should be scanned for writeback during periodic flush. As in write_cache_pages(), write_cache_pages_da() does this scanning for us as we assemble the mpd for later writeout. If we keep track of the next page after the current scan, we can easily update writeback_index without worrying about pages written vs. pages skipped, etc. Without this, an fsync will reset writeback_index to 0 (its starting index) + however many pages it wrote, which can mess up the progress of periodic flush. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/inode.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c9ea95ba5fde..45fc5bdb7d67 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2800,12 +2800,13 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
- struct mpage_da_data *mpd)
+ struct mpage_da_data *mpd,
+ pgoff_t *done_index)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
- int nr_pages;
+ unsigned nr_pages;
pgoff_t index;
pgoff_t end; /* Inclusive */
long nr_to_write = wbc->nr_to_write;
@@ -2820,6 +2821,7 @@ static int write_cache_pages_da(struct address_space *mapping,
else
tag = PAGECACHE_TAG_DIRTY;
+ *done_index = index;
while (!done && (index <= end)) {
int i;
@@ -2843,6 +2845,8 @@ static int write_cache_pages_da(struct address_space *mapping,
break;
}
+ *done_index = page->index + 1;
+
lock_page(page);
/*
@@ -2928,6 +2932,7 @@ static int ext4_da_writepages(struct address_space *mapping,
long desired_nr_to_write, nr_to_writebump = 0;
loff_t range_start = wbc->range_start;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
+ pgoff_t done_index = 0;
pgoff_t end;
trace_ext4_da_writepages(inode, wbc);
@@ -3050,7 +3055,7 @@ retry:
mpd.io_done = 0;
mpd.pages_written = 0;
mpd.retval = 0;
- ret = write_cache_pages_da(mapping, wbc, &mpd);
+ ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
* haven't done the I/O yet, map the blocks and submit
@@ -3104,14 +3109,13 @@ retry:
__func__, wbc->nr_to_write, ret);
/* Update index */
- index += pages_written;
wbc->range_cyclic = range_cyclic;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* set the writeback_index so that range_cyclic
* mode will write it back later
*/
- mapping->writeback_index = index;
+ mapping->writeback_index = done_index;
out_writepages:
wbc->nr_to_write -= nr_to_writebump;