summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2018-12-28 09:39:01 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 21:11:51 +0100
commit0b3901b38d9d916f634e903ce7cd2a8ddd5b1559 (patch)
tree08163d6373d56829d80a1ea60651017d041698d0 /mm
parentmm, page_alloc: enable pcpu_drain with zone capability (diff)
downloadlinux-0b3901b38d9d916f634e903ce7cd2a8ddd5b1559.tar.xz
linux-0b3901b38d9d916f634e903ce7cd2a8ddd5b1559.zip
mm: migration: factor out code to compute expected number of page references
Patch series "mm: migrate: Fix page migration stalls for blkdev pages". This patchset deals with page migration stalls that were reported by our customer due to a block device page that had a bufferhead that was in the bh LRU cache. The patchset modifies the page migration code so that bufferheads are completely handled inside buffer_migrate_page() and then provides a new migration helper for pages with buffer heads that is safe to use even for block device pages and that also deals with bh lrus. This patch (of 6): Factor out function to compute number of expected page references in migrate_page_move_mapping(). Note that we move hpage_nr_pages() and page_has_private() checks from under xas_lock_irq() however this is safe since we hold page lock. [jack@suse.cz: fix expected_page_refs()] Link: http://lkml.kernel.org/r/20181217131710.GB8611@quack2.suse.cz Link: http://lkml.kernel.org/r/20181211172143.7358-2-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 462163f5f278..94c9ebf1f33e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -424,6 +424,22 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
}
#endif /* CONFIG_BLOCK */
+static int expected_page_refs(struct page *page)
+{
+ int expected_count = 1;
+
+ /*
+ * Device public or private pages have an extra refcount as they are
+ * ZONE_DEVICE pages.
+ */
+ expected_count += is_device_private_page(page);
+ expected_count += is_device_public_page(page);
+ if (page_mapping(page))
+ expected_count += hpage_nr_pages(page) + page_has_private(page);
+
+ return expected_count;
+}
+
/*
* Replace the page in the mapping.
*
@@ -440,14 +456,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, page_index(page));
struct zone *oldzone, *newzone;
int dirty;
- int expected_count = 1 + extra_count;
-
- /*
- * Device public or private pages have an extra refcount as they are
- * ZONE_DEVICE pages.
- */
- expected_count += is_device_private_page(page);
- expected_count += is_device_public_page(page);
+ int expected_count = expected_page_refs(page) + extra_count;
if (!mapping) {
/* Anonymous page without mapping */
@@ -467,8 +476,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
newzone = page_zone(newpage);
xas_lock_irq(&xas);
-
- expected_count += hpage_nr_pages(page) + page_has_private(page);
if (page_count(page) != expected_count || xas_load(&xas) != page) {
xas_unlock_irq(&xas);
return -EAGAIN;