summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-02 21:46:42 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 23:02:53 +0200
commita160e5377b55bc5c1925a7456b656aabfc07261f (patch)
tree63c68b1ff88cabb8114a1ace50e360652e7145d5 /mm/memory.c
parentksm: use a folio in replace_page() (diff)
downloadlinux-a160e5377b55bc5c1925a7456b656aabfc07261f.tar.xz
linux-a160e5377b55bc5c1925a7456b656aabfc07261f.zip
mm: convert do_swap_page() to use folio_free_swap()
Also convert should_try_to_free_swap() to use a folio. This removes a few calls to compound_head(). Link: https://lkml.kernel.org/r/20220902194653.1739778-47-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/memory.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2f1397b7c77d..b8e4dae18ac1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3641,14 +3641,14 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
return 0;
}
-static inline bool should_try_to_free_swap(struct page *page,
+static inline bool should_try_to_free_swap(struct folio *folio,
struct vm_area_struct *vma,
unsigned int fault_flags)
{
- if (!PageSwapCache(page))
+ if (!folio_test_swapcache(folio))
return false;
- if (mem_cgroup_swap_full(page) || (vma->vm_flags & VM_LOCKED) ||
- PageMlocked(page))
+ if (mem_cgroup_swap_full(&folio->page) || (vma->vm_flags & VM_LOCKED) ||
+ folio_test_mlocked(folio))
return true;
/*
* If we want to map a page that's in the swapcache writable, we
@@ -3656,8 +3656,8 @@ static inline bool should_try_to_free_swap(struct page *page,
* user. Try freeing the swapcache to get rid of the swapcache
* reference only in case it's likely that we'll be the exlusive user.
*/
- return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) &&
- page_count(page) == 2;
+ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
+ folio_ref_count(folio) == 2;
}
static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
@@ -3949,8 +3949,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* yet.
*/
swap_free(entry);
- if (should_try_to_free_swap(page, vma, vmf->flags))
- try_to_free_swap(page);
+ if (should_try_to_free_swap(folio, vma, vmf->flags))
+ folio_free_swap(folio);
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);