summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-17 04:39:04 +0100
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 20:59:27 +0100
commit22c8ca2ac256bb681be791858b35502b5d37e73b (patch)
tree7eab01f0ad05e228f9d9c01da85503e6fafdb4f2 /arch/x86/mm/init.c
parentx86, mm, Xen: Remove mapping_pagetable_reserve() (diff)
downloadlinux-22c8ca2ac256bb681be791858b35502b5d37e73b.tar.xz
linux-22c8ca2ac256bb681be791858b35502b5d37e73b.zip
x86, mm: Add alloc_low_pages(num)
32bit kmap mapping needs pages to be used for low to high. At this point those pages are still from pgt_buf_* from BRK, so it is ok now. But we want to move early_ioremap_page_table_range_init() out of init_memory_mapping() and only call it one time later, that will make page_table_range_init/page_table_kmap_check/alloc_low_page to use memblock to get page. memblock allocation for pages are from high to low. So will get panic from page_table_kmap_check() that has BUG_ON to do ordering checking. This patch add alloc_low_pages to make it possible to allocate serveral pages at first, and hand out pages one by one from low to high. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-28-git-send-email-yinghai@kernel.org Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 21173fcdb4a1..02cea14c6d0c 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -25,36 +25,45 @@ unsigned long __meminitdata pgt_buf_top;
static unsigned long min_pfn_mapped;
-__ref void *alloc_low_page(void)
+__ref void *alloc_low_pages(unsigned int num)
{
unsigned long pfn;
- void *adr;
+ int i;
#ifdef CONFIG_X86_64
if (after_bootmem) {
- adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
+ unsigned int order;
- return adr;
+ order = get_order((unsigned long)num << PAGE_SHIFT);
+ return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
+ __GFP_ZERO, order);
}
#endif
- if ((pgt_buf_end + 1) >= pgt_buf_top) {
+ if ((pgt_buf_end + num) >= pgt_buf_top) {
unsigned long ret;
if (min_pfn_mapped >= max_pfn_mapped)
panic("alloc_low_page: ran out of memory");
ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
max_pfn_mapped << PAGE_SHIFT,
- PAGE_SIZE, PAGE_SIZE);
+ PAGE_SIZE * num , PAGE_SIZE);
if (!ret)
panic("alloc_low_page: can not alloc memory");
- memblock_reserve(ret, PAGE_SIZE);
+ memblock_reserve(ret, PAGE_SIZE * num);
pfn = ret >> PAGE_SHIFT;
- } else
- pfn = pgt_buf_end++;
+ } else {
+ pfn = pgt_buf_end;
+ pgt_buf_end += num;
+ }
+
+ for (i = 0; i < num; i++) {
+ void *adr;
+
+ adr = __va((pfn + i) << PAGE_SHIFT);
+ clear_page(adr);
+ }
- adr = __va(pfn * PAGE_SIZE);
- clear_page(adr);
- return adr;
+ return __va(pfn << PAGE_SHIFT);
}
/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */