diff options
author | Jan Kara <jack@suse.cz> | 2017-11-16 02:34:44 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 03:21:03 +0100 |
commit | dc7f3e868a45de3cfcd3c849ad32331765547b57 (patch) | |
tree | 745f1ecaec1012a0bafe6bb2a95af32d38732cbf /fs/ext4/inode.c | |
parent | ceph: use pagevec_lookup_range_tag() (diff) | |
download | linux-dc7f3e868a45de3cfcd3c849ad32331765547b57.tar.xz linux-dc7f3e868a45de3cfcd3c849ad32331765547b57.zip |
ext4: use pagevec_lookup_range_tag()
We want only pages from given range in ext4_writepages(). Use
pagevec_lookup_range_tag() instead of pagevec_lookup_tag() and remove
unnecessary code.
Link: http://lkml.kernel.org/r/20171009151359.31984-5-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | fs/ext4/inode.c | 14 |
1 files changed, 2 insertions, 12 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2633150e41b9..3d0708c91c5a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2620,8 +2620,8 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) mpd->map.m_len = 0; mpd->next_page = index; while (index <= end) { - nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, + tag, PAGEVEC_SIZE); if (nr_pages == 0) goto out; @@ -2629,16 +2629,6 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) struct page *page = pvec.pages[i]; /* - * At this point, the page may be truncated or - * invalidated (changing page->mapping to NULL), or - * even swizzled back from swapper_space to tmpfs file - * mapping. However, page->index will not change - * because we have a reference on the page. - */ - if (page->index > end) - goto out; - - /* * Accumulated enough dirty pages? This doesn't apply * to WB_SYNC_ALL mode. For integrity sync we have to * keep going because someone may be concurrently |