diff options
author | Guo Ren <guoren@linux.alibaba.com> | 2020-09-07 08:20:18 +0200 |
---|---|---|
committer | Guo Ren <guoren@linux.alibaba.com> | 2021-01-12 02:52:40 +0100 |
commit | 0c8a32eed1625a65798286fb73fea8710a908545 (patch) | |
tree | 69992b6e217f5e985ebbf1f739b2af336d89138c /arch/csky/mm | |
parent | Linux 5.11-rc3 (diff) | |
download | linux-0c8a32eed1625a65798286fb73fea8710a908545.tar.xz linux-0c8a32eed1625a65798286fb73fea8710a908545.zip |
csky: Add memory layout 2.5G(user):1.5G(kernel)
There are two ways for translating va to pa for csky:
- Use TLB(Translate Lookup Buffer) and PTW (Page Table Walk)
- Use SSEG0/1 (Simple Segment Mapping)
We use tlb mapping 0-2G and 3G-4G virtual address area and SSEG0/1
are for 2G-2.5G and 2.5G-3G translation. We could disable SSEG0
to use 2G-2.5G as TLB user mapping.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/mm')
-rw-r--r-- | arch/csky/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/csky/mm/init.c | 29 |
2 files changed, 23 insertions, 13 deletions
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 081b178b41b1..94eac13b9c97 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -59,7 +59,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, si_code = SEGV_MAPERR; -#ifndef CONFIG_CPU_HAS_TLBI /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. @@ -84,10 +83,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, pmd_t *pmd, *pmd_k; pte_t *pte_k; - unsigned long pgd_base; - - pgd_base = (unsigned long)__va(get_pgd()); - pgd = (pgd_t *)pgd_base + offset; + pgd = get_pgd() + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd_k)) @@ -110,7 +106,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, goto no_context; return; } -#endif perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index af627128314f..7742f1441a67 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -28,9 +28,12 @@ #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/tlb.h> +#include <asm/cacheflush.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +pte_t kernel_pte_tables[(PTRS_PER_PGD - USER_PTRS_PER_PGD)*PTRS_PER_PTE] __page_aligned_bss; + EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; @@ -130,20 +133,32 @@ void pgd_init(unsigned long *p) for (i = 0; i < PTRS_PER_PGD; i++) p[i] = __pa(invalid_pte_table); + + flush_tlb_all(); + local_icache_inv_all(NULL); } -void __init pre_mmu_init(void) +void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) { - /* - * Setup page-table and enable TLB-hardrefill - */ + int i; + + for (i = 0; i < USER_PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = __pa(invalid_pte_table); + + for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i].pgd = + __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); + + for (i = min_pfn; i < max_pfn; i++) + set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); + flush_tlb_all(); - pgd_init((unsigned long *)swapper_pg_dir); - TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); - TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + local_icache_inv_all(NULL); /* Setup page mask to 4k */ write_mmu_pagemask(0); + + setup_pgd(swapper_pg_dir); } void __init fixrange_init(unsigned long start, unsigned long end, |