summaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2022-08-11 18:13:28 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-09-27 04:46:05 +0200
commit0ccf7f168e17bb7eb5a322397ba5a841f4fbaccb (patch)
treec7f8491234fc2a42dce4f1071fc346168732225a /mm/huge_memory.c
parentmm/swap: add swp_offset_pfn() to fetch PFN from swap entry (diff)
downloadlinux-0ccf7f168e17bb7eb5a322397ba5a841f4fbaccb.tar.xz
linux-0ccf7f168e17bb7eb5a322397ba5a841f4fbaccb.zip
mm/thp: carry over dirty bit when thp splits on pmd
Carry over the dirty bit from pmd to pte when a huge pmd splits. It shouldn't be a correctness issue since when pmd_dirty() we'll have the page marked dirty anyway, however having dirty bit carried over helps the next initial writes of split ptes on some archs like x86. Link: https://lkml.kernel.org/r/20220811161331.37055-5-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Huang Ying <ying.huang@intel.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A . Shutemov" <kirill@shutemov.name> Cc: Minchan Kim <minchan@kernel.org> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7bf2299cb24b..b4666774abf0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2037,7 +2037,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
- bool anon_exclusive = false;
+ bool anon_exclusive = false, dirty = false;
unsigned long addr;
int i;
@@ -2126,8 +2126,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
page = pmd_page(old_pmd);
- if (pmd_dirty(old_pmd))
+ if (pmd_dirty(old_pmd)) {
+ dirty = true;
SetPageDirty(page);
+ }
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
@@ -2195,6 +2197,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
+ /* NOTE: this may set soft-dirty too on some archs */
+ if (dirty)
+ entry = pte_mkdirty(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
if (uffd_wp)