summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-07-27 00:23:43 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-27 01:19:19 +0200
commit83358ece26b70f20c0ba2e0e00dc84b0ee24fe6d (patch)
tree17c1a4620b524345a45d90239413e5e87d3e7b5e
parentmm/compaction: split freepages without holding the zone lock (diff)
downloadlinux-83358ece26b70f20c0ba2e0e00dc84b0ee24fe6d.tar.xz
linux-83358ece26b70f20c0ba2e0e00dc84b0ee24fe6d.zip
mm/page_owner: initialize page owner without holding the zone lock
It's not necessary to initialized page_owner with holding the zone lock. It would cause more contention on the zone lock although it's not a big problem since it is just debug feature. But, it is better than before so do it. This is also preparation step to use stackdepot in page owner feature. Stackdepot allocates new pages when there is no reserved space and holding the zone lock in this case will cause deadlock. Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/compaction.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c9
3 files changed, 9 insertions, 5 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 3cda95451d93..4ae1294068a8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/page_owner.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -79,6 +80,8 @@ static void map_pages(struct list_head *list)
arch_alloc_page(page, order);
kernel_map_pages(page, nr_pages, 1);
kasan_alloc_pages(page, order);
+
+ set_page_owner(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44cee1e1d65b..f07552fc43e1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2509,8 +2509,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- set_page_owner(page, order, __GFP_MOVABLE);
-
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 612122bf6a42..927f5ee24c87 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -7,6 +7,7 @@
#include <linux/pageblock-flags.h>
#include <linux/memory.h>
#include <linux/hugetlb.h>
+#include <linux/page_owner.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
@@ -108,8 +109,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
if (pfn_valid_within(page_to_pfn(buddy)) &&
!is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
- kernel_map_pages(page, (1 << order), 1);
- set_page_refcounted(page);
isolated_page = page;
}
}
@@ -128,8 +127,12 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
zone->nr_isolate_pageblock--;
out:
spin_unlock_irqrestore(&zone->lock, flags);
- if (isolated_page)
+ if (isolated_page) {
+ kernel_map_pages(page, (1 << order), 1);
+ set_page_refcounted(page);
+ set_page_owner(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order);
+ }
}
static inline struct page *