diff options
author | Michal Hocko <mhocko@suse.com> | 2016-07-27 00:24:53 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 01:19:19 +0200 |
commit | 8a5c743e308dd2b90ad10d1faaa7a1b09173a132 (patch) | |
tree | ba679ecefc77b4a80bd35b00e55ddc1457b14e07 /mm/readahead.c | |
parent | mm, oom_reaper: make sure that mmput_async is called only when memory was reaped (diff) | |
download | linux-8a5c743e308dd2b90ad10d1faaa7a1b09173a132.tar.xz linux-8a5c743e308dd2b90ad10d1faaa7a1b09173a132.zip |
mm, memcg: use consistent gfp flags during readahead
Vladimir has noticed that we might declare memcg oom even during
readahead because read_pages only uses GFP_KERNEL (with mapping_gfp
restriction) while __do_page_cache_readahead uses
page_cache_alloc_readahead which adds __GFP_NORETRY to prevent from
OOMs. This gfp mask discrepancy is really unfortunate and easily
fixable. Drop page_cache_alloc_readahead() which only has one user and
outsource the gfp_mask logic into readahead_gfp_mask and propagate this
mask from __do_page_cache_readahead down to read_pages.
This alone would have only very limited impact as most filesystems are
implementing ->readpages and the common implementation mpage_readpages
does GFP_KERNEL (with mapping_gfp restriction) again. We can tell it to
use readahead_gfp_mask instead as this function is called only during
readahead as well. The same applies to read_cache_pages.
ext4 has its own ext4_mpage_readpages but the path which has pages !=
NULL can use the same gfp mask. Btrfs, cifs, f2fs and orangefs are
doing a very similar pattern to mpage_readpages so the same can be
applied to them as well.
[akpm@linux-foundation.org: coding-style fixes]
[mhocko@suse.com: restrict gfp mask in mpage_alloc]
Link: http://lkml.kernel.org/r/20160610074223.GC32285@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/1465301556-26431-1-git-send-email-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Cc: Chris Mason <clm@fb.com>
Cc: Steve French <sfrench@samba.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Cc: Mike Marshall <hubcap@omnibond.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Changman Lee <cm224.lee@samsung.com>
Cc: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/readahead.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 40be3ae0afe3..65ec288dc057 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,7 +89,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, page = lru_to_page(pages); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) { + readahead_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -108,7 +108,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, EXPORT_SYMBOL(read_cache_pages); static int read_pages(struct address_space *mapping, struct file *filp, - struct list_head *pages, unsigned nr_pages) + struct list_head *pages, unsigned int nr_pages, gfp_t gfp) { struct blk_plug plug; unsigned page_idx; @@ -126,10 +126,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = lru_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) { + if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) mapping->a_ops->readpage(filp, page); - } put_page(page); } ret = 0; @@ -159,6 +157,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, int page_idx; int ret = 0; loff_t isize = i_size_read(inode); + gfp_t gfp_mask = readahead_gfp_mask(mapping); if (isize == 0) goto out; @@ -180,7 +179,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, if (page && !radix_tree_exceptional_entry(page)) continue; - page = page_cache_alloc_readahead(mapping); + page = __page_cache_alloc(gfp_mask); if (!page) break; page->index = page_offset; @@ -196,7 +195,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, * will then handle the error. */ if (ret) - read_pages(mapping, filp, &page_pool, ret); + read_pages(mapping, filp, &page_pool, ret, gfp_mask); BUG_ON(!list_empty(&page_pool)); out: return ret; |