From 2dde02ab6d1a725ddccc7144ff6bf5f55d37f916 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 30 Sep 2020 18:58:50 -0700 Subject: ARC: mm: support 3 levels of page tables ARCv2 MMU is software walked and Linux implements 2 levels of paging: pgd/pte. Forthcoming hw will have multiple levels, so this change preps mm code for same. It is also fun to try multi levels even on soft-walked code to ensure generic mm code is robust to handle. overview ________ 2 levels {pgd, pte} : pmd is folded but pmd_* macros are valid and operate on pgd 3 levels {pgd, pmd, pte}: - pud is folded and pud_* macros point to pgd - pmd_* macros operate on actual pmd code changes ____________ 1. #include 2. Define CONFIG_PGTABLE_LEVELS 3 3a. Define PMD_SHIFT, PMD_SIZE, PMD_MASK, pmd_t 3b. Define pmd_val() which actually deals with pmd (pmd_offset(), pmd_index() are provided by generic code) 3c. pmd_alloc_one()/pmd_free() also provided by generic code (pmd_populate/pmd_free already exist) 4. Define pud_none(), pud_bad() macros based on generic pud_val() which internally pertains to pgd now. 4b. define pud_populate() to just setup pgd Acked-by: Mike Rapoport Signed-off-by: Vineet Gupta --- arch/arc/mm/fault.c | 4 ++++ arch/arc/mm/init.c | 1 + arch/arc/mm/tlb.c | 4 ++-- arch/arc/mm/tlbex.S | 9 +++++++++ 4 files changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/arc/mm') diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 41f154320964..8da2f0ad8c69 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -39,6 +39,8 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) if (!pgd_present(*pgd_k)) goto bad_area; + set_pgd(pgd, *pgd_k); + p4d = p4d_offset(pgd, address); p4d_k = p4d_offset(pgd_k, address); if (!p4d_present(*p4d_k)) @@ -49,6 +51,8 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) if (!pud_present(*pud_k)) goto bad_area; + set_pud(pud, *pud_k); + pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 46ad9aee7a73..f7ba2a5d5ec8 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -191,6 +191,7 @@ void __init mem_init(void) highmem_init(); BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); + BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE); BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE); } diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index dfc0c1aba48f..5f71445f26bd 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -621,8 +621,8 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); n += scnprintf(buf + n, len - n, - "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n", - p_mmu->ver, p_mmu->pg_sz_k, super_pg, + "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n", + p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, p_mmu->u_dtlb, p_mmu->u_itlb, IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index c4a5f16444ce..5f57eba1089d 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -173,6 +173,15 @@ ex_saved_reg1: tst r3, r3 bz do_slow_path_pf ; if no Page Table, do page fault +#if CONFIG_PGTABLE_LEVELS > 2 + lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD + and r0, r0, (PTRS_PER_PMD - 1) + ld.as r1, [r3, r0] ; PMD entry + tst r1, r1 + bz do_slow_path_pf + mov r3, r1 +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp) add2.nz r1, r1, r0 -- cgit v1.2.3