diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/extable.c | 66 | ||||
-rw-r--r-- | arch/riscv/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 22 |
3 files changed, 71 insertions, 23 deletions
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index ddb7d3b99e89..05978f78579f 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -7,27 +7,65 @@ */ +#include <linux/bitfield.h> #include <linux/extable.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <asm/asm-extable.h> +#include <asm/ptrace.h> -#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) -int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs); -#endif +static inline unsigned long +get_ex_fixup(const struct exception_table_entry *ex) +{ + return ((unsigned long)&ex->fixup + ex->fixup); +} + +static bool ex_handler_fixup(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + regs->epc = get_ex_fixup(ex); + return true; +} + +static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset, + unsigned long val) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return; + + if (!offset) + *(unsigned long *)((unsigned long)regs + offset) = val; +} + +static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); + int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data); + + regs_set_gpr(regs, reg_err, -EFAULT); + regs_set_gpr(regs, reg_zero, 0); + + regs->epc = get_ex_fixup(ex); + return true; +} -int fixup_exception(struct pt_regs *regs) +bool fixup_exception(struct pt_regs *regs) { - const struct exception_table_entry *fixup; + const struct exception_table_entry *ex; - fixup = search_exception_tables(regs->epc); - if (!fixup) - return 0; + ex = search_exception_tables(regs->epc); + if (!ex) + return false; -#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) - if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END) - return rv_bpf_fixup_exception(fixup, regs); -#endif + switch (ex->type) { + case EX_TYPE_FIXUP: + return ex_handler_fixup(ex, regs); + case EX_TYPE_BPF: + return ex_handler_bpf(ex, regs); + case EX_TYPE_UACCESS_ERR_ZERO: + return ex_handler_uaccess_err_zero(ex, regs); + } - regs->epc = fixup->fixup; - return 1; + BUG(); } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index aa08dd2f8fae..4e9efbe46d5f 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -31,7 +31,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, bust_spinlocks(0); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void no_context(struct pt_regs *regs, unsigned long addr) @@ -235,7 +235,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * only copy the information from the master page table, * nothing more. */ - if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { + if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) { vmalloc_fault(regs, code, addr); return; } @@ -330,7 +330,7 @@ good_area: if (fault_signal_pending(fault, regs)) return; - if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { + if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 24b2b8044602..0624c68331d8 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -187,10 +187,10 @@ static void __init setup_bootmem(void) phys_ram_end = memblock_end_of_DRAM(); -#ifndef CONFIG_64BIT #ifndef CONFIG_XIP_KERNEL phys_ram_base = memblock_start_of_DRAM(); #endif +#ifndef CONFIG_64BIT /* * memblock allocator is not aware of the fact that last 4K bytes of * the addressable memory can not be mapped because of IS_ERR_VALUE @@ -367,7 +367,8 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - BUG_ON(!vaddr); + BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr))); + return __pa(vaddr); } @@ -812,13 +813,22 @@ static void __init reserve_crashkernel(void) /* * Current riscv boot protocol requires 2MB alignment for * RV64 and 4MB alignment for RV32 (hugepage size) + * + * Try to alloc from 32bit addressible physical memory so that + * swiotlb can work on the crash kernel. */ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE, - search_start, search_end); + search_start, + min(search_end, (unsigned long) SZ_4G)); if (crash_base == 0) { - pr_warn("crashkernel: couldn't allocate %lldKB\n", - crash_size >> 10); - return; + /* Try again without restricting region to 32bit addressible memory */ + crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE, + search_start, search_end); + if (crash_base == 0) { + pr_warn("crashkernel: couldn't allocate %lldKB\n", + crash_size >> 10); + return; + } } pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", |