summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2024-06-07 14:23:54 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-07-04 04:30:05 +0200
commit2c1f057e5be63e890f2dd89e4c25ab5eef084a91 (patch)
tree136c1865e2a48b2926c715f06891870c0f7aefaf
parentfs/proc/task_mmu: don't indicate PM_MMAP_EXCLUSIVE without PM_PRESENT (diff)
downloadlinux-2c1f057e5be63e890f2dd89e4c25ab5eef084a91.tar.xz
linux-2c1f057e5be63e890f2dd89e4c25ab5eef084a91.zip
fs/proc/task_mmu: properly detect PM_MMAP_EXCLUSIVE per page of PMD-mapped THPs
We added PM_MMAP_EXCLUSIVE in 2015 via commit 77bb499bb60f ("pagemap: add mmap-exclusive bit for marking pages mapped only here"), when THPs could not be partially mapped and page_mapcount() returned something that was true for all pages of the THP. In 2016, we added support for partially mapping THPs via commit 53f9263baba6 ("mm: rework mapcount accounting to enable 4k mapping of THPs") but missed to determine PM_MMAP_EXCLUSIVE as well per page. Checking page_mapcount() on the head page does not tell the whole story. We should check each individual page. In a future without per-page mapcounts it will be different, but we'll change that to be consistent with PTE-mapped THPs once we deal with that. Link: https://lkml.kernel.org/r/20240607122357.115423-4-david@redhat.com Fixes: 53f9263baba6 ("mm: rework mapcount accounting to enable 4k mapping of THPs") Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lance Yang <ioworker0@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 22892cdb74ce..a45f2da0ada0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1477,6 +1477,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
+ unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp;
struct page *page = NULL;
@@ -1493,8 +1494,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_uffd_wp(pmd))
flags |= PM_UFFD_WP;
if (pm->show_pfn)
- frame = pmd_pfn(pmd) +
- ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ frame = pmd_pfn(pmd) + idx;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
else if (is_swap_pmd(pmd)) {
@@ -1503,11 +1503,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pm->show_pfn) {
if (is_pfn_swap_entry(entry))
- offset = swp_offset_pfn(entry);
+ offset = swp_offset_pfn(entry) + idx;
else
- offset = swp_offset(entry);
- offset = offset +
- ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ offset = swp_offset(entry) + idx;
frame = swp_type(entry) |
(offset << MAX_SWAPFILES_SHIFT);
}
@@ -1523,12 +1521,16 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
- flags |= PM_MMAP_EXCLUSIVE;
- for (; addr != end; addr += PAGE_SIZE) {
- pagemap_entry_t pme = make_pme(frame, flags);
+ for (; addr != end; addr += PAGE_SIZE, idx++) {
+ unsigned long cur_flags = flags;
+ pagemap_entry_t pme;
+
+ if (page && (flags & PM_PRESENT) &&
+ page_mapcount(page + idx) == 1)
+ cur_flags |= PM_MMAP_EXCLUSIVE;
+ pme = make_pme(frame, cur_flags);
err = add_to_pagemap(&pme, pm);
if (err)
break;