summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2015-11-06 03:49:53 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 04:34:48 +0100
commit5c3f9a67371643b6faa987622bc1b67667bab848 (patch)
tree6c29472764822d11cd53509a7bc93aa3f6803e1c /mm/migrate.c
parentmm: page migration trylock newpage at same level as oldpage (diff)
downloadlinux-5c3f9a67371643b6faa987622bc1b67667bab848.tar.xz
linux-5c3f9a67371643b6faa987622bc1b67667bab848.zip
mm: page migration remove_migration_ptes at lock+unlock level
Clean up page migration a little more by calling remove_migration_ptes() from the same level, on success or on failure, from __unmap_and_move() or from unmap_and_move_huge_page(). Don't reset page->mapping of a PageAnon old page in move_to_new_page(), leave that to when the page is freed. Except for here in page migration, it has been an invariant that a PageAnon (bit set in page->mapping) page stays PageAnon until it is freed, and I think we're safer to keep to that. And with the above rearrangement, it's necessary because zap_pte_range() wants to identify whether a migration entry represents a file or an anon page, to update the appropriate rss stats without waiting on it. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/migrate.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 6d7774ef0e6c..7b44ebdf2d26 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -722,7 +722,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* MIGRATEPAGE_SUCCESS - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
- int page_was_mapped, enum migrate_mode mode)
+ enum migrate_mode mode)
{
struct address_space *mapping;
int rc;
@@ -755,19 +755,21 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* space which also has its own migratepage callback. This
* is the most common path for page migration.
*/
- rc = mapping->a_ops->migratepage(mapping,
- newpage, page, mode);
+ rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
else
rc = fallback_migrate_page(mapping, newpage, page, mode);
- if (rc != MIGRATEPAGE_SUCCESS) {
+ /*
+ * When successful, old pagecache page->mapping must be cleared before
+ * page is freed; but stats require that PageAnon be left as PageAnon.
+ */
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ set_page_memcg(page, NULL);
+ if (!PageAnon(page))
+ page->mapping = NULL;
+ } else {
set_page_memcg(newpage, NULL);
newpage->mapping = NULL;
- } else {
- set_page_memcg(page, NULL);
- if (page_was_mapped)
- remove_migration_ptes(page, newpage);
- page->mapping = NULL;
}
return rc;
}
@@ -902,10 +904,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
if (!page_mapped(page))
- rc = move_to_new_page(newpage, page, page_was_mapped, mode);
+ rc = move_to_new_page(newpage, page, mode);
- if (rc && page_was_mapped)
- remove_migration_ptes(page, page);
+ if (page_was_mapped)
+ remove_migration_ptes(page,
+ rc == MIGRATEPAGE_SUCCESS ? newpage : page);
out_unlock_both:
unlock_page(newpage);
@@ -1066,10 +1069,11 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
}
if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
+ rc = move_to_new_page(new_hpage, hpage, mode);
- if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
- remove_migration_ptes(hpage, hpage);
+ if (page_was_mapped)
+ remove_migration_ptes(hpage,
+ rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
unlock_page(new_hpage);