summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-12-13 01:55:52 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 21:42:48 +0100
commit031bc5743f158b2d5498294f489e534a31251626 (patch)
tree472c0024f26821a627ec17a2e73a450f450ca4cc /mm
parentmm/debug-pagealloc: prepare boottime configurable on/off (diff)
downloadlinux-031bc5743f158b2d5498294f489e534a31251626.tar.xz
linux-031bc5743f158b2d5498294f489e534a31251626.zip
mm/debug-pagealloc: make debug-pagealloc boottime configurable
Now, we have prepared to avoid using debug-pagealloc in boottime. So introduce new kernel-parameter to disable debug-pagealloc in boottime, and makes related functions to be disabled in this case. Only non-intuitive part is change of guard page functions. Because guard page is effective only if debug-pagealloc is enabled, turning off according to debug-pagealloc is reasonable thing to do. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/debug-pagealloc.c8
-rw-r--r--mm/page_alloc.c20
2 files changed, 27 insertions, 1 deletions
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
index 0072f2c53331..5bf5906ce13b 100644
--- a/mm/debug-pagealloc.c
+++ b/mm/debug-pagealloc.c
@@ -10,11 +10,17 @@ static bool page_poisoning_enabled __read_mostly;
static bool need_page_poisoning(void)
{
+ if (!debug_pagealloc_enabled())
+ return false;
+
return true;
}
static void init_page_poisoning(void)
{
+ if (!debug_pagealloc_enabled())
+ return;
+
page_poisoning_enabled = true;
}
@@ -119,7 +125,7 @@ static void unpoison_pages(struct page *page, int n)
unpoison_page(page + i);
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!page_poisoning_enabled)
return;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0a39d328ca1..303d38516807 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -425,15 +425,35 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
+bool _debug_pagealloc_enabled __read_mostly;
bool _debug_guardpage_enabled __read_mostly;
+static int __init early_debug_pagealloc(char *buf)
+{
+ if (!buf)
+ return -EINVAL;
+
+ if (strcmp(buf, "on") == 0)
+ _debug_pagealloc_enabled = true;
+
+ return 0;
+}
+early_param("debug_pagealloc", early_debug_pagealloc);
+
static bool need_debug_guardpage(void)
{
+ /* If we don't use debug_pagealloc, we don't need guard page */
+ if (!debug_pagealloc_enabled())
+ return false;
+
return true;
}
static void init_debug_guardpage(void)
{
+ if (!debug_pagealloc_enabled())
+ return;
+
_debug_guardpage_enabled = true;
}