summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-03-06 00:46:43 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 06:07:18 +0100
commit23a7052a5db478bdacf45bea55e6f50171f5eede (patch)
tree19a6113acea0e6a287e4653f365d8b5ff2780df6 /mm/page_alloc.c
parentarch/powerpc/mm/hugetlb: NestMMU workaround for hugetlb mprotect RW upgrade (diff)
downloadlinux-23a7052a5db478bdacf45bea55e6f50171f5eede.tar.xz
linux-23a7052a5db478bdacf45bea55e6f50171f5eede.zip
mm/page_alloc.c: check return value of memblock_alloc_node_nopanic()
There are two early memory allocations that use memblock_alloc_node_nopanic() and do not check its return value. While this happens very early during boot and chances that the allocation will fail are diminishing, it is still worth to have proper checks for the allocation errors. Link: http://lkml.kernel.org/r/1547734941-944-1-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ec250453f5e8..11a5f50efd97 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6431,10 +6431,14 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
{
unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
- if (usemapsize)
+ if (usemapsize) {
zone->pageblock_flags =
memblock_alloc_node_nopanic(usemapsize,
pgdat->node_id);
+ if (!zone->pageblock_flags)
+ panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
+ usemapsize, zone->name, pgdat->node_id);
+ }
}
#else
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
@@ -6664,6 +6668,9 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
map = memblock_alloc_node_nopanic(size, pgdat->node_id);
+ if (!map)
+ panic("Failed to allocate %ld bytes for node %d memory map\n",
+ size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",