summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/internal.h1
-rw-r--r--mm/swap.c35
-rw-r--r--mm/truncate.c2
4 files changed, 20 insertions, 19 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 304f174b4d31..064e60e9f63f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -372,7 +372,6 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
diff --git a/mm/internal.h b/mm/internal.h
index 9c1959fff477..7c441f43ba31 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -66,6 +66,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
+void deactivate_file_folio(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
diff --git a/mm/swap.c b/mm/swap.c
index fc3b7989f5b2..65ec5cbab78b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -630,32 +630,33 @@ void lru_add_drain_cpu(int cpu)
}
/**
- * deactivate_file_page - forcefully deactivate a file page
- * @page: page to deactivate
+ * deactivate_file_folio() - Forcefully deactivate a file folio.
+ * @folio: Folio to deactivate.
*
- * This function hints the VM that @page is a good reclaim candidate,
- * for example if its invalidation fails due to the page being dirty
+ * This function hints to the VM that @folio is a good reclaim candidate,
+ * for example if its invalidation fails due to the folio being dirty
* or under writeback.
+ *
+ * Context: Caller holds a reference on the page.
*/
-void deactivate_file_page(struct page *page)
+void deactivate_file_folio(struct folio *folio)
{
+ struct pagevec *pvec;
+
/*
- * In a workload with many unevictable page such as mprotect,
- * unevictable page deactivation for accelerating reclaim is pointless.
+ * In a workload with many unevictable pages such as mprotect,
+ * unevictable folio deactivation for accelerating reclaim is pointless.
*/
- if (PageUnevictable(page))
+ if (folio_test_unevictable(folio))
return;
- if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec;
-
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
+ folio_get(folio);
+ local_lock(&lru_pvecs.lock);
+ pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
- if (pagevec_add_and_need_flush(pvec, page))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
- local_unlock(&lru_pvecs.lock);
- }
+ if (pagevec_add_and_need_flush(pvec, &folio->page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
+ local_unlock(&lru_pvecs.lock);
}
/*
diff --git a/mm/truncate.c b/mm/truncate.c
index a8b0243eadf6..9ed62cb3c503 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -527,7 +527,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
* of interest and try to speed up its reclaim.
*/
if (!ret) {
- deactivate_file_page(&folio->page);
+ deactivate_file_folio(folio);
/* It is likely on the pagevec of a remote CPU */
if (nr_pagevec)
(*nr_pagevec)++;