diff options
author | Minchan Kim <minchan@kernel.org> | 2017-07-07 00:37:24 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-07 01:24:31 +0200 |
commit | 0f0746589e4be071a8f890b2035c97c30c7a4e16 (patch) | |
tree | fa4613c9460f97cb64176ad8fb93d3fb2b30ad36 | |
parent | mm, THP, swap: unify swap slot free functions to put_swap_page (diff) | |
download | linux-0f0746589e4be071a8f890b2035c97c30c7a4e16.tar.xz linux-0f0746589e4be071a8f890b2035c97c30c7a4e16.zip |
mm, THP, swap: move anonymous THP split logic to vmscan
The add_to_swap aims to allocate swap_space(ie, swap slot and swapcache)
so if it fails due to lack of space in case of THP or something(hdd swap
but tries THP swapout) *caller* rather than add_to_swap itself should
split the THP page and retry it with base page which is more natural.
Link: http://lkml.kernel.org/r/20170515112522.32457-4-ying.huang@intel.com
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 4 | ||||
-rw-r--r-- | mm/swap_state.c | 23 | ||||
-rw-r--r-- | mm/vmscan.c | 17 |
3 files changed, 24 insertions, 20 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index ead6fd7966b4..5ab1c98c7d27 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -353,7 +353,7 @@ extern struct address_space *swapper_spaces[]; >> SWAP_ADDRESS_SPACE_SHIFT]) extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); -extern int add_to_swap(struct page *, struct list_head *list); +extern int add_to_swap(struct page *page); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); extern void __delete_from_swap_cache(struct page *); @@ -473,7 +473,7 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp) return NULL; } -static inline int add_to_swap(struct page *page, struct list_head *list) +static inline int add_to_swap(struct page *page) { return 0; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 0ad214d7a7ad..9c71b6b2562f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -184,7 +184,7 @@ void __delete_from_swap_cache(struct page *page) * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ -int add_to_swap(struct page *page, struct list_head *list) +int add_to_swap(struct page *page) { swp_entry_t entry; int err; @@ -192,12 +192,12 @@ int add_to_swap(struct page *page, struct list_head *list) VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageUptodate(page), page); -retry: entry = get_swap_page(page); if (!entry.val) - goto fail; + return 0; + if (mem_cgroup_try_charge_swap(page, entry)) - goto fail_free; + goto fail; /* * Radix-tree node allocations from PF_MEMALLOC contexts could @@ -218,23 +218,12 @@ retry: * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ - goto fail_free; - - if (PageTransHuge(page)) { - err = split_huge_page_to_list(page, list); - if (err) { - delete_from_swap_cache(page); - return 0; - } - } + goto fail; return 1; -fail_free: - put_swap_page(page, entry); fail: - if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) - goto retry; + put_swap_page(page, entry); return 0; } diff --git a/mm/vmscan.c b/mm/vmscan.c index cb7c154a4a9d..729e37f02de6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1125,8 +1125,23 @@ static unsigned long shrink_page_list(struct list_head *page_list, !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; - if (!add_to_swap(page, page_list)) + if (!add_to_swap(page)) { + if (!PageTransHuge(page)) + goto activate_locked; + /* Split THP and swap individual base pages */ + if (split_huge_page_to_list(page, page_list)) + goto activate_locked; + if (!add_to_swap(page)) + goto activate_locked; + } + + /* XXX: We don't support THP writes */ + if (PageTransHuge(page) && + split_huge_page_to_list(page, page_list)) { + delete_from_swap_cache(page); goto activate_locked; + } + may_enter_fs = 1; /* Adding to swap updated mapping */ |