diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2020-06-02 06:46:51 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-02 19:59:06 +0200 |
commit | 2c684234d36f7e8c80414e4a772911d407e821fa (patch) | |
tree | fa1542af0f9bc1c502e81dae66d17e4c7674d237 /mm/readahead.c | |
parent | mm: move end_index check out of readahead loop (diff) | |
download | linux-2c684234d36f7e8c80414e4a772911d407e821fa.tar.xz linux-2c684234d36f7e8c80414e4a772911d407e821fa.zip |
mm: add page_cache_readahead_unbounded
ext4 and f2fs have duplicated the guts of the readahead code so they can
read past i_size. Instead, separate out the guts of the readahead code
so they can call it directly.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-14-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r-- | mm/readahead.c | 68 |
1 files changed, 47 insertions, 21 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 998fdd23c0b1..ae231a5312cb 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -156,37 +156,34 @@ out: rac->_index++; } -/* - * __do_page_cache_readahead() actually reads a chunk of disk. It allocates - * the pages first, then submits them for I/O. This avoids the very bad - * behaviour which would occur if page allocations are causing VM writeback. - * We really don't want to intermingle reads and writes like that. +/** + * page_cache_readahead_unbounded - Start unchecked readahead. + * @mapping: File address space. + * @file: This instance of the open file; used for authentication. + * @index: First page index to read. + * @nr_to_read: The number of pages to read. + * @lookahead_size: Where to start the next readahead. + * + * This function is for filesystems to call when they want to start + * readahead beyond a file's stated i_size. This is almost certainly + * not the function you want to call. Use page_cache_async_readahead() + * or page_cache_sync_readahead() instead. + * + * Context: File is referenced by caller. Mutexes may be held by caller. + * May sleep, but will not reenter filesystem to reclaim memory. */ -void __do_page_cache_readahead(struct address_space *mapping, - struct file *filp, pgoff_t index, unsigned long nr_to_read, +void page_cache_readahead_unbounded(struct address_space *mapping, + struct file *file, pgoff_t index, unsigned long nr_to_read, unsigned long lookahead_size) { - struct inode *inode = mapping->host; LIST_HEAD(page_pool); - loff_t isize = i_size_read(inode); gfp_t gfp_mask = readahead_gfp_mask(mapping); struct readahead_control rac = { .mapping = mapping, - .file = filp, + .file = file, ._index = index, }; unsigned long i; - pgoff_t end_index; /* The last page we want to read */ - - if (isize == 0) - return; - - end_index = (isize - 1) >> PAGE_SHIFT; - if (index > end_index) - return; - /* Don't read past the page containing the last byte of the file */ - if (nr_to_read > end_index - index) - nr_to_read = end_index - index + 1; /* * Preallocate as many pages as we will need. @@ -230,6 +227,35 @@ void __do_page_cache_readahead(struct address_space *mapping, */ read_pages(&rac, &page_pool, false); } +EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded); + +/* + * __do_page_cache_readahead() actually reads a chunk of disk. It allocates + * the pages first, then submits them for I/O. This avoids the very bad + * behaviour which would occur if page allocations are causing VM writeback. + * We really don't want to intermingle reads and writes like that. + */ +void __do_page_cache_readahead(struct address_space *mapping, + struct file *file, pgoff_t index, unsigned long nr_to_read, + unsigned long lookahead_size) +{ + struct inode *inode = mapping->host; + loff_t isize = i_size_read(inode); + pgoff_t end_index; /* The last page we want to read */ + + if (isize == 0) + return; + + end_index = (isize - 1) >> PAGE_SHIFT; + if (index > end_index) + return; + /* Don't read past the page containing the last byte of the file */ + if (nr_to_read > end_index - index) + nr_to_read = end_index - index + 1; + + page_cache_readahead_unbounded(mapping, file, index, nr_to_read, + lookahead_size); +} /* * Chunk the readahead into 2 megabyte units, so that we don't pin too much |