summaryrefslogtreecommitdiffstats
path: root/mm/page_io.c
diff options
context:
space:
mode:
authorBarry Song <v-songbaohua@oppo.com>2024-09-09 01:21:17 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-09-17 10:07:01 +0200
commit9d57090e73d5e00e946d7fdd6398c2c0bc3b5525 (patch)
tree573827b214d1a6796015a8436005239521150c80 /mm/page_io.c
parentmm/debug_vm_pgtable: Use pxdp_get() for accessing page table entries (diff)
downloadlinux-9d57090e73d5e00e946d7fdd6398c2c0bc3b5525.tar.xz
linux-9d57090e73d5e00e946d7fdd6398c2c0bc3b5525.zip
mm: fix swap_read_folio_zeromap() for large folios with partial zeromap
Patch series "mm: enable large folios swap-in support", v9. Currently, we support mTHP swapout but not swapin. This means that once mTHP is swapped out, it will come back as small folios when swapped in. This is particularly detrimental for devices like Android, where more than half of the memory is in swap. The lack of mTHP swapin functionality makes mTHP a showstopper in scenarios that heavily rely on swap. This patchset introduces mTHP swap-in support. It starts with synchronous devices similar to zRAM, aiming to benefit as many users as possible with minimal changes. This patch (of 3): There could be a corner case where the first entry is non-zeromap, but a subsequent entry is zeromap. In this case, we should not let swap_read_folio_zeromap() return false since we will still read corrupted data. Additionally, the iteration of test_bit() is unnecessary and can be replaced with bitmap operations, which are more efficient. We can adopt the style of swap_pte_batch() and folio_pte_batch() to introduce swap_zeromap_batch() which seems to provide the greatest flexibility for the caller. This approach allows the caller to either check if the zeromap status of all entries is consistent or determine the number of contiguous entries with the same status. Since swap_read_folio() can't handle reading a large folio that's partially zeromap and partially non-zeromap, we've moved the code to mm/swap.h so that others, like those working on swap-in, can access it. Link: https://lkml.kernel.org/r/20240908232119.2157-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240908232119.2157-2-21cnbao@gmail.com Fixes: 0ca0c24e3211 ("mm: store zero pages to be swapped out in a bitmap") Signed-off-by: Barry Song <v-songbaohua@oppo.com> Reviewed-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Usama Arif <usamaarif642@gmail.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Chris Li <chrisl@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Chuanhua Han <hanchuanhua@oppo.com> Cc: David Hildenbrand <david@redhat.com> Cc: Gao Xiang <xiang@kernel.org> Cc: Huang Ying <ying.huang@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kairui Song <kasong@tencent.com> Cc: Kairui Song <ryncsn@gmail.com> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Yang Shi <shy828301@gmail.com> Cc: Kanchana P Sridhar <kanchana.p.sridhar@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c32
1 files changed, 7 insertions, 25 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index b6f1519d63b0..78bc88acee79 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -227,26 +227,6 @@ static void swap_zeromap_folio_clear(struct folio *folio)
}
/*
- * Return the index of the first subpage which is not zero-filled
- * according to swap_info_struct->zeromap.
- * If all pages are zero-filled according to zeromap, it will return
- * folio_nr_pages(folio).
- */
-static unsigned int swap_zeromap_folio_test(struct folio *folio)
-{
- struct swap_info_struct *sis = swp_swap_info(folio->swap);
- swp_entry_t entry;
- unsigned int i;
-
- for (i = 0; i < folio_nr_pages(folio); i++) {
- entry = page_swap_entry(folio_page(folio, i));
- if (!test_bit(swp_offset(entry), sis->zeromap))
- return i;
- }
- return i;
-}
-
-/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
*/
@@ -522,19 +502,21 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
static bool swap_read_folio_zeromap(struct folio *folio)
{
- unsigned int idx = swap_zeromap_folio_test(folio);
-
- if (idx == 0)
- return false;
+ int nr_pages = folio_nr_pages(folio);
+ bool is_zeromap;
/*
* Swapping in a large folio that is partially in the zeromap is not
* currently handled. Return true without marking the folio uptodate so
* that an IO error is emitted (e.g. do_swap_page() will sigbus).
*/
- if (WARN_ON_ONCE(idx < folio_nr_pages(folio)))
+ if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
+ &is_zeromap) != nr_pages))
return true;
+ if (!is_zeromap)
+ return false;
+
folio_zero_range(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
return true;