diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 12 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 18 |
3 files changed, 26 insertions, 8 deletions
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index fa324bd5a5c4..4a07630a6616 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache); static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; #if CONFIG_ARM64_PGTABLE_LEVELS > 2 -static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; #endif #if CONFIG_ARM64_PGTABLE_LEVELS > 3 -static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; #endif static inline pud_t * __init early_ioremap_pud(unsigned long addr) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6894ef3e6234..0bf90d26e745 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -297,11 +297,15 @@ static void __init map_mem(void) * create_mapping requires puds, pmds and ptes to be allocated from * memory addressable from the initial direct kernel mapping. * - * The initial direct kernel mapping, located at swapper_pg_dir, - * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be - * aligned to 2MB as per Documentation/arm64/booting.txt). + * The initial direct kernel mapping, located at swapper_pg_dir, gives + * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from + * PHYS_OFFSET (which must be aligned to 2MB as per + * Documentation/arm64/booting.txt). */ - limit = PHYS_OFFSET + PUD_SIZE; + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) + limit = PHYS_OFFSET + PMD_SIZE; + else + limit = PHYS_OFFSET + PUD_SIZE; memblock_set_current_limit(limit); /* map all the memory banks */ diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 62c6101df260..6682b361d3ac 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -30,12 +30,14 @@ #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +static struct kmem_cache *pgd_cache; + pgd_t *pgd_alloc(struct mm_struct *mm) { if (PGD_SIZE == PAGE_SIZE) return (pgd_t *)get_zeroed_page(GFP_KERNEL); else - return kzalloc(PGD_SIZE, GFP_KERNEL); + return kmem_cache_zalloc(pgd_cache, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) if (PGD_SIZE == PAGE_SIZE) free_page((unsigned long)pgd); else - kfree(pgd); + kmem_cache_free(pgd_cache, pgd); +} + +static int __init pgd_cache_init(void) +{ + /* + * Naturally aligned pgds required by the architecture. + */ + if (PGD_SIZE != PAGE_SIZE) + pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, + SLAB_PANIC, NULL); + return 0; } +core_initcall(pgd_cache_init); |