diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/fault.c | 10 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 89 |
2 files changed, 63 insertions, 36 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 4e9efbe46d5f..40694f0cab9e 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -102,9 +102,9 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) { pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - p4d_t *p4d, *p4d_k; - pmd_t *pmd, *pmd_k; + pud_t *pud_k; + p4d_t *p4d_k; + pmd_t *pmd_k; pte_t *pte_k; int index; unsigned long pfn; @@ -132,14 +132,12 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a } set_pgd(pgd, *pgd_k); - p4d = p4d_offset(pgd, addr); p4d_k = p4d_offset(pgd_k, addr); if (!p4d_present(*p4d_k)) { no_context(regs, addr); return; } - pud = pud_offset(p4d, addr); pud_k = pud_offset(p4d_k, addr); if (!pud_present(*pud_k)) { no_context(regs, addr); @@ -150,13 +148,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * Since the vmalloc area is global, it is unnecessary * to copy individual PTEs */ - pmd = pmd_offset(pud, addr); pmd_k = pmd_offset(pud_k, addr); if (!pmd_present(*pmd_k)) { no_context(regs, addr); return; } - set_pmd(pmd, *pmd_k); /* * Make sure the actual PTE exists as well to diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 05ed641a1134..d466ec670e1f 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -76,38 +76,74 @@ static void __init zone_sizes_init(void) } #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) + +#define LOG2_SZ_1K ilog2(SZ_1K) +#define LOG2_SZ_1M ilog2(SZ_1M) +#define LOG2_SZ_1G ilog2(SZ_1G) +#define LOG2_SZ_1T ilog2(SZ_1T) + static inline void print_mlk(char *name, unsigned long b, unsigned long t) { pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, - (((t) - (b)) >> 10)); + (((t) - (b)) >> LOG2_SZ_1K)); } static inline void print_mlm(char *name, unsigned long b, unsigned long t) { pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, - (((t) - (b)) >> 20)); + (((t) - (b)) >> LOG2_SZ_1M)); +} + +static inline void print_mlg(char *name, unsigned long b, unsigned long t) +{ + pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, + (((t) - (b)) >> LOG2_SZ_1G)); +} + +#ifdef CONFIG_64BIT +static inline void print_mlt(char *name, unsigned long b, unsigned long t) +{ + pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, + (((t) - (b)) >> LOG2_SZ_1T)); +} +#else +#define print_mlt(n, b, t) do {} while (0) +#endif + +static inline void print_ml(char *name, unsigned long b, unsigned long t) +{ + unsigned long diff = t - b; + + if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10) + print_mlt(name, b, t); + else if ((diff >> LOG2_SZ_1G) >= 10) + print_mlg(name, b, t); + else if ((diff >> LOG2_SZ_1M) >= 10) + print_mlm(name, b, t); + else + print_mlk(name, b, t); } static void __init print_vm_layout(void) { pr_notice("Virtual kernel memory layout:\n"); - print_mlk("fixmap", (unsigned long)FIXADDR_START, - (unsigned long)FIXADDR_TOP); - print_mlm("pci io", (unsigned long)PCI_IO_START, - (unsigned long)PCI_IO_END); - print_mlm("vmemmap", (unsigned long)VMEMMAP_START, - (unsigned long)VMEMMAP_END); - print_mlm("vmalloc", (unsigned long)VMALLOC_START, - (unsigned long)VMALLOC_END); - print_mlm("lowmem", (unsigned long)PAGE_OFFSET, - (unsigned long)high_memory); + print_ml("fixmap", (unsigned long)FIXADDR_START, + (unsigned long)FIXADDR_TOP); + print_ml("pci io", (unsigned long)PCI_IO_START, + (unsigned long)PCI_IO_END); + print_ml("vmemmap", (unsigned long)VMEMMAP_START, + (unsigned long)VMEMMAP_END); + print_ml("vmalloc", (unsigned long)VMALLOC_START, + (unsigned long)VMALLOC_END); + print_ml("lowmem", (unsigned long)PAGE_OFFSET, + (unsigned long)high_memory); if (IS_ENABLED(CONFIG_64BIT)) { #ifdef CONFIG_KASAN - print_mlm("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END); + print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END); #endif - print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, - (unsigned long)ADDRESS_SPACE_END); + print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR, + (unsigned long)ADDRESS_SPACE_END); } } #else @@ -120,13 +156,7 @@ void __init mem_init(void) BUG_ON(!mem_map); #endif /* CONFIG_FLATMEM */ -#ifdef CONFIG_SWIOTLB - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > PFN_DOWN(dma32_phys_limit)) - swiotlb_init(1); - else - swiotlb_force = SWIOTLB_NO_FORCE; -#endif + swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE); memblock_free_all(); print_vm_layout(); @@ -584,9 +614,9 @@ static void __init create_p4d_mapping(p4d_t *p4dp, create_pte_mapping(__nextp, __va, __pa, __sz, __prot) #define fixmap_pgd_next ((uintptr_t)fixmap_pte) #define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd) -#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) -#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) -#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) +#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) +#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) +#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0) #endif /* __PAGETABLE_PMD_FOLDED */ void __init create_pgd_mapping(pgd_t *pgdp, @@ -677,7 +707,7 @@ static __init pgprot_t pgprot_from_va(uintptr_t va) } #endif /* CONFIG_STRICT_KERNEL_RWX */ -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) static void __init disable_pgtable_l5(void) { pgtable_l5_enabled = false; @@ -849,7 +879,7 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa) * MMU is not enabled, the page tables are allocated directly using * early_pmd/pud/p4d and the address returned is the physical one. */ -void __init pt_ops_set_early(void) +static void __init pt_ops_set_early(void) { pt_ops.alloc_pte = alloc_pte_early; pt_ops.get_pte_virt = get_pte_virt_early; @@ -871,7 +901,7 @@ void __init pt_ops_set_early(void) * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va, * but it will be used as described above. */ -void __init pt_ops_set_fixmap(void) +static void __init pt_ops_set_fixmap(void) { pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap); pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap); @@ -889,7 +919,7 @@ void __init pt_ops_set_fixmap(void) * MMU is enabled and page table setup is complete, so from now, we can use * generic page allocation functions to setup page table. */ -void __init pt_ops_set_late(void) +static void __init pt_ops_set_late(void) { pt_ops.alloc_pte = alloc_pte_late; pt_ops.get_pte_virt = get_pte_virt_late; @@ -953,6 +983,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); #endif + apply_early_boot_alternatives(); pt_ops_set_early(); /* Setup early PGD for fixmap */ |