summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2017-02-23 00:46:28 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 01:41:30 +0100
commit685dbf6f5a643c4bdb9323ee3544ec652505d2ea (patch)
tree2d4d5853126589958b4632b71041822478b9ab4f /mm/page_alloc.c
parentmm: help __GFP_NOFAIL allocations which do not trigger OOM killer (diff)
downloadlinux-685dbf6f5a643c4bdb9323ee3544ec652505d2ea.tar.xz
linux-685dbf6f5a643c4bdb9323ee3544ec652505d2ea.zip
mm, page_alloc: warn_alloc nodemask is NULL when cpusets are disabled
The patch "mm, page_alloc: warn_alloc print nodemask" implicitly sets the allocation nodemask to cpuset_current_mems_allowed when there is no effective mempolicy. cpuset_current_mems_allowed is only effective when cpusets are enabled, which is also printed by warn_alloc(), so setting the nodemask to cpuset_current_mems_allowed is redundant and prevents debugging issues where ac->nodemask is not set properly in the page allocator. This provides better debugging output since cpuset_print_current_mems_allowed() is already provided. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1701181347320.142399@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/page_alloc.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a179607de26f..c21b33668133 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3034,7 +3034,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
va_list args;
static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- nodemask_t *nm = (nodemask) ? nodemask : &cpuset_current_mems_allowed;
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
debug_guardpage_minorder() > 0)
@@ -3048,11 +3047,16 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
pr_cont("%pV", &vaf);
va_end(args);
- pr_cont(", mode:%#x(%pGg), nodemask=%*pbl\n", gfp_mask, &gfp_mask, nodemask_pr_args(nm));
+ pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
+ if (nodemask)
+ pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
+ else
+ pr_cont("(null)\n");
+
cpuset_print_current_mems_allowed();
dump_stack();
- warn_alloc_show_mem(gfp_mask, nm);
+ warn_alloc_show_mem(gfp_mask, nodemask);
}
static inline struct page *