summaryrefslogtreecommitdiffstats
path: root/mm/bootmem.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-01-11 00:08:15 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-11 01:30:45 +0100
commit799f933a82d878d7f15215473c5561ce984ada75 (patch)
treea096ac8de8da31385026c3d51eec292815438ac4 /mm/bootmem.c
parentmm: bootmem: drop superfluous range check when freeing pages in bulk (diff)
downloadlinux-799f933a82d878d7f15215473c5561ce984ada75.tar.xz
linux-799f933a82d878d7f15215473c5561ce984ada75.zip
mm: bootmem: try harder to free pages in bulk
The loop that frees pages to the page allocator while bootstrapping tries to free higher-order blocks only when the starting address is aligned to that block size. Otherwise it will free all pages on that node one-by-one. Change it to free individual pages up to the first aligned block and then try higher-order frees from there. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/bootmem.c')
-rw-r--r--mm/bootmem.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1aea171539ac..668e94df8cf2 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
- int aligned;
struct page *page;
unsigned long start, end, pages, count = 0;
@@ -181,14 +180,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
start = bdata->node_min_pfn;
end = bdata->node_low_pfn;
- /*
- * If the start is aligned to the machines wordsize, we might
- * be able to free pages in bulks of that order.
- */
- aligned = !(start & (BITS_PER_LONG - 1));
-
- bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
- bdata - bootmem_node_data, start, end, aligned);
+ bdebug("nid=%td start=%lx end=%lx\n",
+ bdata - bootmem_node_data, start, end);
while (start < end) {
unsigned long *map, idx, vec;
@@ -196,12 +189,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
vec = ~map[idx / BITS_PER_LONG];
-
- if (aligned && vec == ~0UL) {
+ /*
+ * If we have a properly aligned and fully unreserved
+ * BITS_PER_LONG block of pages in front of us, free
+ * it in one go.
+ */
+ if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
count += BITS_PER_LONG;
+ start += BITS_PER_LONG;
} else {
unsigned long off = 0;
@@ -214,8 +212,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
vec >>= 1;
off++;
}
+ start = ALIGN(start + 1, BITS_PER_LONG);
}
- start += BITS_PER_LONG;
}
page = virt_to_page(bdata->node_bootmem_map);