diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2012-01-11 00:08:10 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 01:30:44 +0100 |
commit | c3993076f842de3754360e5b998d6657a9d30303 (patch) | |
tree | 78c1ca3d031483932e2f236706b20064742c0b0c /mm | |
parent | tracepoint: add tracepoints for debugging oom_score_adj (diff) | |
download | linux-c3993076f842de3754360e5b998d6657a9d30303.tar.xz linux-c3993076f842de3754360e5b998d6657a9d30303.zip |
mm: page_alloc: generalize order handling in __free_pages_bootmem()
__free_pages_bootmem() used to special-case higher-order frees to save
individual page checking with free_pages_bulk().
Nowadays, both zero order and non-zero order frees use free_pages(), which
checks each individual page anyway, and so there is little point in making
the distinction anymore. The higher-order loop will work just fine for
zero order pages.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59153da58c69..794e6715c226 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -730,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order) local_irq_restore(flags); } -/* - * permit the bootmem allocator to evade page validation on high-order frees - */ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) { - if (order == 0) { - __ClearPageReserved(page); - set_page_count(page, 0); - set_page_refcounted(page); - __free_page(page); - } else { - int loop; - - prefetchw(page); - for (loop = 0; loop < (1 << order); loop++) { - struct page *p = &page[loop]; + unsigned int nr_pages = 1 << order; + unsigned int loop; - if (loop + 1 < (1 << order)) - prefetchw(p + 1); - __ClearPageReserved(p); - set_page_count(p, 0); - } + prefetchw(page); + for (loop = 0; loop < nr_pages; loop++) { + struct page *p = &page[loop]; - set_page_refcounted(page); - __free_pages(page, order); + if (loop + 1 < nr_pages) + prefetchw(p + 1); + __ClearPageReserved(p); + set_page_count(p, 0); } + + set_page_refcounted(page); + __free_pages(page, order); } |