diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2020-06-02 06:46:40 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-02 19:59:06 +0200 |
commit | c1f6925e10912c7e329840387730049e5e1848c8 (patch) | |
tree | 9f7a33756deb90ea798f4ecc44f6b86c67bf1df2 /mm/readahead.c | |
parent | mm: remove 'page_offset' from readahead loop (diff) | |
download | linux-c1f6925e10912c7e329840387730049e5e1848c8.tar.xz linux-c1f6925e10912c7e329840387730049e5e1848c8.zip |
mm: put readahead pages in cache earlier
When populating the page cache for readahead, mappings that use
->readpages must populate the page cache themselves as the pages are
passed on a linked list which would normally be used for the page
cache's LRU. For mappings that use ->readpage or the upcoming
->readahead method, we can put the pages into the page cache as soon as
they're allocated, which solves a race between readahead and direct IO.
It also lets us remove the gfp argument from read_pages().
Use the new readahead_page() API to implement the repeated calls to
->readpage(), just like most filesystems will.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-11-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r-- | mm/readahead.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index ddc63d3b07b8..e52b3a7b9da5 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -114,14 +114,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, EXPORT_SYMBOL(read_cache_pages); static void read_pages(struct readahead_control *rac, struct list_head *pages, - gfp_t gfp) + bool skip_page) { const struct address_space_operations *aops = rac->mapping->a_ops; + struct page *page; struct blk_plug plug; - unsigned page_idx; if (!readahead_count(rac)) - return; + goto out; blk_start_plug(&plug); @@ -130,23 +130,23 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, readahead_count(rac)); /* Clean up the remaining pages */ put_pages_list(pages); - goto out; - } - - for (page_idx = 0; page_idx < readahead_count(rac); page_idx++) { - struct page *page = lru_to_page(pages); - list_del(&page->lru); - if (!add_to_page_cache_lru(page, rac->mapping, page->index, - gfp)) + rac->_index += rac->_nr_pages; + rac->_nr_pages = 0; + } else { + while ((page = readahead_page(rac))) { aops->readpage(rac->file, page); - put_page(page); + put_page(page); + } } -out: blk_finish_plug(&plug); BUG_ON(!list_empty(pages)); - rac->_nr_pages = 0; + BUG_ON(readahead_count(rac)); + +out: + if (skip_page) + rac->_index++; } /* @@ -168,6 +168,7 @@ void __do_page_cache_readahead(struct address_space *mapping, struct readahead_control rac = { .mapping = mapping, .file = filp, + ._index = index, }; unsigned long i; @@ -183,6 +184,8 @@ void __do_page_cache_readahead(struct address_space *mapping, if (index + i > end_index) break; + BUG_ON(index + i != rac._index + rac._nr_pages); + page = xa_load(&mapping->i_pages, index + i); if (page && !xa_is_value(page)) { /* @@ -190,15 +193,22 @@ void __do_page_cache_readahead(struct address_space *mapping, * contiguous pages before continuing with the next * batch. */ - read_pages(&rac, &page_pool, gfp_mask); + read_pages(&rac, &page_pool, true); continue; } page = __page_cache_alloc(gfp_mask); if (!page) break; - page->index = index + i; - list_add(&page->lru, &page_pool); + if (mapping->a_ops->readpages) { + page->index = index + i; + list_add(&page->lru, &page_pool); + } else if (add_to_page_cache_lru(page, mapping, index + i, + gfp_mask) < 0) { + put_page(page); + read_pages(&rac, &page_pool, true); + continue; + } if (i == nr_to_read - lookahead_size) SetPageReadahead(page); rac._nr_pages++; @@ -209,7 +219,7 @@ void __do_page_cache_readahead(struct address_space *mapping, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - read_pages(&rac, &page_pool, gfp_mask); + read_pages(&rac, &page_pool, false); } /* |