summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-12-13 22:58:41 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 20:58:32 +0100
commit6e03492e9d288d9ce886064289e2768da5d7d967 (patch)
tree2754538a468fc8fbb91fcd5abc1e35720b67ec8e
parentmm: remove page_swap_info() (diff)
downloadlinux-6e03492e9d288d9ce886064289e2768da5d7d967.tar.xz
linux-6e03492e9d288d9ce886064289e2768da5d7d967.zip
mm: return a folio from read_swap_cache_async()
The only two callers simply call put_page() on the page returned, so they're happier calling folio_put(). Saves two calls to compound_head(). Link: https://lkml.kernel.org/r/20231213215842.671461-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/madvise.c22
-rw-r--r--mm/swap.h7
-rw-r--r--mm/swap_state.c8
3 files changed, 18 insertions, 19 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 6214a1ab5654..912155a94ed5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
for (addr = start; addr < end; addr += PAGE_SIZE) {
pte_t pte;
swp_entry_t entry;
- struct page *page;
+ struct folio *folio;
if (!ptep++) {
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
pte_unmap_unlock(ptep, ptl);
ptep = NULL;
- page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+ folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
}
if (ptep)
@@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
{
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
pgoff_t end_index = linear_page_index(vma, end) - 1;
- struct page *page;
+ struct folio *folio;
struct swap_iocb *splug = NULL;
rcu_read_lock();
- xas_for_each(&xas, page, end_index) {
+ xas_for_each(&xas, folio, end_index) {
unsigned long addr;
swp_entry_t entry;
- if (!xa_is_value(page))
+ if (!xa_is_value(folio))
continue;
- entry = radix_to_swp_entry(page);
+ entry = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(entry))
continue;
@@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
xas_pause(&xas);
rcu_read_unlock();
- page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+ folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
rcu_read_lock();
}
diff --git a/mm/swap.h b/mm/swap.h
index 6bf25342589f..82c68ccb5ab1 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -46,10 +46,9 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index);
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr,
- struct swap_iocb **plug);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug);
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d2fe70e307d9..97c8a950dd18 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -533,9 +533,9 @@ fail_put_swap:
* __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug)
{
bool page_allocated;
struct mempolicy *mpol;
@@ -549,7 +549,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (page_allocated)
swap_read_folio(folio, false, plug);
- return folio_file_page(folio, swp_offset(entry));
+ return folio;
}
static unsigned int __swapin_nr_pages(unsigned long prev_offset,