summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-09 23:14:56 +0100
committerArnd Bergmann <arnd@arndb.de>2018-03-16 10:56:13 +0100
commit79375ea3ec527f746d5beae8c8f6e8a58740d4a8 (patch)
tree68862deeecaeb2c631195ee7d4c523a58272f1bf /mm
parentmm: remove blackfin MPU support (diff)
downloadlinux-79375ea3ec527f746d5beae8c8f6e8a58740d4a8.tar.xz
linux-79375ea3ec527f746d5beae8c8f6e8a58740d4a8.zip
mm: remove obsolete alloc_remap()
Tile was the only remaining architecture to implement alloc_remap(), and since that is being removed, there is no point in keeping this function. Removing all callers simplifies the mem_map handling. Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/sparse.c15
2 files changed, 1 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb416723538f..484e21062228 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6199,10 +6199,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = alloc_remap(pgdat->node_id, size);
- if (!map)
- map = memblock_virt_alloc_node_nopanic(size,
- pgdat->node_id);
+ map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
diff --git a/mm/sparse.c b/mm/sparse.c
index 7af5e7a92528..65bb52599f90 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -427,10 +427,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
struct page *map;
unsigned long size;
- map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
- if (map)
- return map;
-
size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
map = memblock_virt_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -446,17 +442,6 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
unsigned long pnum;
unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
- map = alloc_remap(nodeid, size * map_count);
- if (map) {
- for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
- if (!present_section_nr(pnum))
- continue;
- map_map[pnum] = map;
- map += size;
- }
- return;
- }
-
size = PAGE_ALIGN(size);
map = memblock_virt_alloc_try_nid_raw(size * map_count,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),