summaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 02:50:35 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 02:50:35 +0100
commit5ce1a70e2f00f0bce0cab57f798ca354b9496169 (patch)
tree6e80200536b7a3576fd71ff2c7135ffe87dc858e /mm/memblock.c
parentMerge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/pow... (diff)
parentksm: allocate roots when needed (diff)
downloadlinux-5ce1a70e2f00f0bce0cab57f798ca354b9496169.tar.xz
linux-5ce1a70e2f00f0bce0cab57f798ca354b9496169.zip
Merge branch 'akpm' (more incoming from Andrew)
Merge second patch-bomb from Andrew Morton: - A little DM fix - the MM queue * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (154 commits) ksm: allocate roots when needed mm: cleanup "swapcache" in do_swap_page mm,ksm: swapoff might need to copy mm,ksm: FOLL_MIGRATION do migration_entry_wait ksm: shrink 32-bit rmap_item back to 32 bytes ksm: treat unstable nid like in stable tree ksm: add some comments tmpfs: fix mempolicy object leaks tmpfs: fix use-after-free of mempolicy object mm/fadvise.c: drain all pagevecs if POSIX_FADV_DONTNEED fails to discard all pages mm: export mmu notifier invalidates mm: accelerate mm_populate() treatment of THP pages mm: use long type for page counts in mm_populate() and get_user_pages() mm: accurately document nr_free_*_pages functions with code comments HWPOISON: change order of error_states[]'s elements HWPOISON: fix misjudgement of page_action() for errors on mlocked pages memcg: stop warning on memcg_propagate_kmem net: change type of virtio_chan->p9_max_pages vmscan: change type of vm_total_pages to unsigned long fs/nfsd: change type of max_delegations, nfsd_drc_max_mem and nfsd_drc_mem_used ...
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index b8d9147e5c08..1bcd9b970564 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,9 +92,58 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
*
* Find @size free area aligned to @align in the specified range and node.
*
+ * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
+ * memory we found if not in hotpluggable ranges.
+ *
* RETURNS:
* Found address on success, %0 on failure.
*/
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ phys_addr_t this_start, this_end, cand;
+ u64 i;
+ int curr = movablemem_map.nr_map - 1;
+
+ /* pump up @end */
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+ end = memblock.current_limit;
+
+ /* avoid allocating the first page */
+ start = max_t(phys_addr_t, start, PAGE_SIZE);
+ end = max(start, end);
+
+ for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
+
+restart:
+ if (this_end <= this_start || this_end < size)
+ continue;
+
+ for (; curr >= 0; curr--) {
+ if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT)
+ < this_end)
+ break;
+ }
+
+ cand = round_down(this_end - size, align);
+ if (curr >= 0 &&
+ cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) {
+ this_end = movablemem_map.map[curr].start_pfn
+ << PAGE_SHIFT;
+ goto restart;
+ }
+
+ if (cand >= this_start)
+ return cand;
+ }
+
+ return 0;
+}
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align, int nid)
@@ -123,6 +172,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
}
return 0;
}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* memblock_find_in_range - find free area in given range