diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-03-09 15:24:08 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-09 20:48:48 +0100 |
commit | 90eceff1a375f6ffa78caf8654e787c0a8a591ef (patch) | |
tree | a3fa46c32dc6b45dc99559eafa937ff4c4d2f1d7 /mm/memory.c | |
parent | mm: convert generic code to 5-level paging (diff) | |
download | linux-90eceff1a375f6ffa78caf8654e787c0a8a591ef.tar.xz linux-90eceff1a375f6ffa78caf8654e787c0a8a591ef.zip |
mm: introduce __p4d_alloc()
For full 5-level paging we need a helper to allocate p4d page table.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 23 |
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 7f1c2163b3ce..235ba51b2fbf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3906,6 +3906,29 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(handle_mm_fault); +#ifndef __PAGETABLE_P4D_FOLDED +/* + * Allocate p4d page table. + * We've already handled the fast-path in-line. + */ +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +{ + p4d_t *new = p4d_alloc_one(mm, address); + if (!new) + return -ENOMEM; + + smp_wmb(); /* See comment in __pte_alloc */ + + spin_lock(&mm->page_table_lock); + if (pgd_present(*pgd)) /* Another has populated it */ + p4d_free(mm, new); + else + pgd_populate(mm, pgd, new); + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif /* __PAGETABLE_P4D_FOLDED */ + #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. |