summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2015-11-06 03:49:37 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 04:34:48 +0100
commit51afb12ba809db664682a31154c11e720e2c363c (patch)
tree6bf2b9e336698b4259a0ba8a4cff964035e70fc3 /mm
parentmm: rmap use pte lock not mmap_sem to set PageMlocked (diff)
downloadlinux-51afb12ba809db664682a31154c11e720e2c363c.tar.xz
linux-51afb12ba809db664682a31154c11e720e2c363c.zip
mm: page migration fix PageMlocked on migrated pages
Commit e6c509f85455 ("mm: use clear_page_mlock() in page_remove_rmap()") in v3.7 inadvertently made mlock_migrate_page() impotent: page migration unmaps the page from userspace before migrating, and that commit clears PageMlocked on the final unmap, leaving mlock_migrate_page() with nothing to do. Not a serious bug, the next attempt at reclaiming the page would fix it up; but a betrayal of page migration's intent - the new page ought to emerge as PageMlocked. I don't see how to fix it for mlock_migrate_page() itself; but easily fixed in remove_migration_pte(), by calling mlock_vma_page() when the vma is VM_LOCKED - under pte lock as in try_to_unmap_one(). Delete mlock_migrate_page()? Not quite, it does still serve a purpose for migrate_misplaced_transhuge_page(): where we could replace it by a test, clear_page_mlock(), mlock_vma_page() sequence; but would that be an improvement? mlock_migrate_page() is fairly lean, and let's make it leaner by skipping the irq save/restore now clearly not needed. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h9
-rw-r--r--mm/migrate.c6
2 files changed, 8 insertions, 7 deletions
diff --git a/mm/internal.h b/mm/internal.h
index bc0fa9a69e46..d4b807d6c963 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -271,20 +271,19 @@ extern unsigned int munlock_vma_page(struct page *page);
extern void clear_page_mlock(struct page *page);
/*
- * mlock_migrate_page - called only from migrate_page_copy() to
- * migrate the Mlocked page flag; update statistics.
+ * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
+ * (because that does not go through the full procedure of migration ptes):
+ * to migrate the Mlocked page flag; update statistics.
*/
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page)) {
- unsigned long flags;
int nr_pages = hpage_nr_pages(page);
- local_irq_save(flags);
+ /* Holding pmd lock, no change in irq context: __mod is safe */
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
SetPageMlocked(newpage);
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
- local_irq_restore(flags);
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 94961f4654b7..ed72c499df8a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -171,6 +171,9 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
else
page_add_file_rmap(new);
+ if (vma->vm_flags & VM_LOCKED)
+ mlock_vma_page(new);
+
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, ptep);
unlock:
@@ -537,7 +540,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
cpupid = page_cpupid_xchg_last(page, -1);
page_cpupid_xchg_last(newpage, cpupid);
- mlock_migrate_page(newpage, page);
ksm_migrate_page(newpage, page);
/*
* Please do not reorder this without considering how mm/ksm.c's
@@ -1787,7 +1789,6 @@ fail_putback:
SetPageActive(page);
if (TestClearPageUnevictable(new_page))
SetPageUnevictable(page);
- mlock_migrate_page(page, new_page);
unlock_page(new_page);
put_page(new_page); /* Free it */
@@ -1829,6 +1830,7 @@ fail_putback:
goto fail_putback;
}
+ mlock_migrate_page(new_page, page);
mem_cgroup_migrate(page, new_page, false);
page_remove_rmap(page);