diff options
Diffstat (limited to 'arch/nds32/kernel')
-rw-r--r-- | arch/nds32/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/nds32/kernel/dma.c | 325 | ||||
-rw-r--r-- | arch/nds32/kernel/fpu.c | 17 | ||||
-rw-r--r-- | arch/nds32/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/nds32/kernel/sys_nds32.c | 26 | ||||
-rw-r--r-- | arch/nds32/kernel/traps.c | 17 | ||||
-rw-r--r-- | arch/nds32/kernel/vdso/Makefile | 1 |
7 files changed, 44 insertions, 345 deletions
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile index a1a1d61509e5..394df3f6442c 100644 --- a/arch/nds32/kernel/Makefile +++ b/arch/nds32/kernel/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the linux kernel. # diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index d0dbd4fe9645..490e3720d694 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -3,327 +3,13 @@ #include <linux/types.h> #include <linux/mm.h> -#include <linux/string.h> #include <linux/dma-noncoherent.h> -#include <linux/io.h> #include <linux/cache.h> #include <linux/highmem.h> -#include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/proc-fns.h> -/* - * This is the page table (2MB) covering uncached, DMA consistent allocations - */ -static pte_t *consistent_pte; -static DEFINE_RAW_SPINLOCK(consistent_lock); - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vm_region region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vm_region_alloc with an appropriate - * struct vm_region head (eg): - * - * struct vm_region vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vm_region_alloc(). - */ -struct arch_vm_region { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; - struct page *vm_pages; -}; - -static struct arch_vm_region consistent_head = { - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_start = CONSISTENT_BASE, - .vm_end = CONSISTENT_END, -}; - -static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head, - size_t size, int gfp) -{ - unsigned long addr = head->vm_start, end = head->vm_end - size; - unsigned long flags; - struct arch_vm_region *c, *new; - - new = kmalloc(sizeof(struct arch_vm_region), gfp); - if (!new) - goto out; - - raw_spin_lock_irqsave(&consistent_lock, flags); - - list_for_each_entry(c, &head->vm_list, vm_list) { - if ((addr + size) < addr) - goto nospc; - if ((addr + size) <= c->vm_start) - goto found; - addr = c->vm_end; - if (addr > end) - goto nospc; - } - -found: - /* - * Insert this entry _before_ the one we found. - */ - list_add_tail(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - - raw_spin_unlock_irqrestore(&consistent_lock, flags); - return new; - -nospc: - raw_spin_unlock_irqrestore(&consistent_lock, flags); - kfree(new); -out: - return NULL; -} - -static struct arch_vm_region *vm_region_find(struct arch_vm_region *head, - unsigned long addr) -{ - struct arch_vm_region *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_start == addr) - goto out; - } - c = NULL; -out: - return c; -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, unsigned long attrs) -{ - struct page *page; - struct arch_vm_region *c; - unsigned long order; - u64 mask = ~0ULL, limit; - pgprot_t prot = pgprot_noncached(PAGE_KERNEL); - - if (!consistent_pte) { - pr_err("%s: not initialized\n", __func__); - dump_stack(); - return NULL; - } - - if (dev) { - mask = dev->coherent_dma_mask; - - /* - * Sanity check the DMA mask - it must be non-zero, and - * must be able to be satisfied by a DMA allocation. - */ - if (mask == 0) { - dev_warn(dev, "coherent DMA mask is unset\n"); - goto no_page; - } - - } - - /* - * Sanity check the allocation size. - */ - size = PAGE_ALIGN(size); - limit = (mask + 1) & ~mask; - if ((limit && size >= limit) || - size >= (CONSISTENT_END - CONSISTENT_BASE)) { - pr_warn("coherent allocation too big " - "(requested %#x mask %#llx)\n", size, mask); - goto no_page; - } - - order = get_order(size); - - if (mask != 0xffffffff) - gfp |= GFP_DMA; - - page = alloc_pages(gfp, order); - if (!page) - goto no_page; - - /* - * Invalidate any data that might be lurking in the - * kernel direct-mapped region for device DMA. - */ - { - unsigned long kaddr = (unsigned long)page_address(page); - memset(page_address(page), 0, size); - cpu_dma_wbinval_range(kaddr, kaddr + size); - } - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = vm_region_alloc(&consistent_head, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); - if (c) { - pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); - struct page *end = page + (1 << order); - - c->vm_pages = page; - - /* - * Set the "dma handle" - */ - *handle = page_to_phys(page); - - do { - BUG_ON(!pte_none(*pte)); - - /* - * x86 does not mark the pages reserved... - */ - SetPageReserved(page); - set_pte(pte, mk_pte(page, prot)); - page++; - pte++; - } while (size -= PAGE_SIZE); - - /* - * Free the otherwise unused pages. - */ - while (page < end) { - __free_page(page); - page++; - } - - return (void *)c->vm_start; - } - - if (page) - __free_pages(page, order); -no_page: - *handle = ~0; - return NULL; -} - -void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) -{ - struct arch_vm_region *c; - unsigned long flags, addr; - pte_t *ptep; - - size = PAGE_ALIGN(size); - - raw_spin_lock_irqsave(&consistent_lock, flags); - - c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); - if (!c) - goto no_area; - - if ((c->vm_end - c->vm_start) != size) { - pr_err("%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); - addr = c->vm_start; - do { - pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); - unsigned long pfn; - - ptep++; - addr += PAGE_SIZE; - - if (!pte_none(pte) && pte_present(pte)) { - pfn = pte_pfn(pte); - - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - - /* - * x86 does not mark the pages reserved... - */ - ClearPageReserved(page); - - __free_page(page); - continue; - } - } - - pr_crit("%s: bad page in kernel page table\n", __func__); - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - list_del(&c->vm_list); - - raw_spin_unlock_irqrestore(&consistent_lock, flags); - - kfree(c); - return; - -no_area: - raw_spin_unlock_irqrestore(&consistent_lock, flags); - pr_err("%s: trying to free invalid coherent area: %p\n", - __func__, cpu_addr); - dump_stack(); -} - -/* - * Initialise the consistent memory allocation. - */ -static int __init consistent_init(void) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - int ret = 0; - - do { - pgd = pgd_offset(&init_mm, CONSISTENT_BASE); - pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } - /* The first level mapping may be created in somewhere. - * It's not necessary to warn here. */ - /* WARN_ON(!pmd_none(*pmd)); */ - - pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); - if (!pte) { - ret = -ENOMEM; - break; - } - - consistent_pte = pte; - } while (0); - - return ret; -} - -core_initcall(consistent_init); - static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { @@ -389,3 +75,14 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, BUG(); } } + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + cache_op(page_to_phys(page), size, cpu_dma_wbinval_range); +} + +static int __init atomic_pool_init(void) +{ + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); +} +postcore_initcall(atomic_pool_init); diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index fddd40c7a16f..62bdafbc53f4 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -14,7 +14,7 @@ const struct fpu_struct init_fpuregs = { .fd_regs = {[0 ... 31] = sNAN64}, .fpcsr = FPCSR_INIT, #if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) - .UDF_trap = 0 + .UDF_IEX_trap = 0 #endif }; @@ -178,7 +178,7 @@ inline void do_fpu_context_switch(struct pt_regs *regs) /* First time FPU user. */ load_fpu(&init_fpuregs); #if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) - current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap; + current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap; #endif set_used_math(); } @@ -206,7 +206,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) unsigned int fpcsr; int si_code = 0, si_signo = SIGFPE; #if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) - unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT; + unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT|FPCSR_mskIEXT; #else unsigned long redo_except = FPCSR_mskDNIT; #endif @@ -215,21 +215,18 @@ inline void handle_fpu_exception(struct pt_regs *regs) fpcsr = current->thread.fpu.fpcsr; if (fpcsr & redo_except) { -#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) - if (fpcsr & FPCSR_mskUDFT) - current->thread.fpu.fpcsr &= ~FPCSR_mskIEX; -#endif si_signo = do_fpuemu(regs, ¤t->thread.fpu); fpcsr = current->thread.fpu.fpcsr; - if (!si_signo) + if (!si_signo) { + current->thread.fpu.fpcsr &= ~(redo_except); goto done; + } } else if (fpcsr & FPCSR_mskRIT) { if (!user_mode(regs)) do_exit(SIGILL); si_signo = SIGILL; } - switch (si_signo) { case SIGFPE: fill_sigfpe_signo(fpcsr, &si_code); @@ -246,7 +243,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) } force_sig_fault(si_signo, si_code, - (void __user *)instruction_pointer(regs), current); + (void __user *)instruction_pointer(regs)); done: own_fpu(); } diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 5f7660aa2d68..fe61513982b4 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -163,7 +163,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) return regs->uregs[0]; badframe: - force_sig(SIGSEGV, current); + force_sig(SIGSEGV); return 0; } diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c index 0835277636ce..cb2d1e219bb3 100644 --- a/arch/nds32/kernel/sys_nds32.c +++ b/arch/nds32/kernel/sys_nds32.c @@ -6,8 +6,8 @@ #include <asm/cachectl.h> #include <asm/proc-fns.h> -#include <asm/udftrap.h> #include <asm/fpu.h> +#include <asm/fp_udfiex_crtl.h> SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, @@ -51,31 +51,33 @@ SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache) return 0; } -SYSCALL_DEFINE1(udftrap, int, option) +SYSCALL_DEFINE2(fp_udfiex_crtl, unsigned int, cmd, unsigned int, act) { #if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) - int old_udftrap; + int old_udf_iex; if (!used_math()) { load_fpu(&init_fpuregs); - current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap; + current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap; set_used_math(); } - old_udftrap = current->thread.fpu.UDF_trap; - switch (option) { - case DISABLE_UDFTRAP: - current->thread.fpu.UDF_trap = 0; + old_udf_iex = current->thread.fpu.UDF_IEX_trap; + act &= (FPCSR_mskUDFE | FPCSR_mskIEXE); + + switch (cmd) { + case DISABLE_UDF_IEX_TRAP: + current->thread.fpu.UDF_IEX_trap &= ~act; break; - case ENABLE_UDFTRAP: - current->thread.fpu.UDF_trap = FPCSR_mskUDFE; + case ENABLE_UDF_IEX_TRAP: + current->thread.fpu.UDF_IEX_trap |= act; break; - case GET_UDFTRAP: + case GET_UDF_IEX_TRAP: break; default: return -EINVAL; } - return old_udftrap; + return old_udf_iex; #else return -ENOTSUPP; #endif diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 5aa7c17da27a..f4d386b52622 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -205,7 +205,7 @@ int bad_syscall(int n, struct pt_regs *regs) } force_sig_fault(SIGILL, ILL_ILLTRP, - (void __user *)instruction_pointer(regs) - 4, current); + (void __user *)instruction_pointer(regs) - 4); die_if_kernel("Oops - bad syscall", regs, n); return regs->uregs[0]; } @@ -255,14 +255,15 @@ void __init early_trap_init(void) cpu_cache_wbinval_page(base, true); } -void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, - int error_code, int si_code) +static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code) { + struct task_struct *tsk = current; + tsk->thread.trap_no = ENTRY_DEBUG_RELATED; tsk->thread.error_code = error_code; force_sig_fault(SIGTRAP, si_code, - (void __user *)instruction_pointer(regs), tsk); + (void __user *)instruction_pointer(regs)); } void do_debug_trap(unsigned long entry, unsigned long addr, @@ -274,7 +275,7 @@ void do_debug_trap(unsigned long entry, unsigned long addr, if (user_mode(regs)) { /* trap_signal */ - send_sigtrap(current, regs, 0, TRAP_BRKPT); + send_sigtrap(regs, 0, TRAP_BRKPT); } else { /* kernel_trap */ if (!fixup_exception(regs)) @@ -288,7 +289,7 @@ void unhandled_interruption(struct pt_regs *regs) show_regs(regs); if (!user_mode(regs)) do_exit(SIGKILL); - force_sig(SIGKILL, current); + force_sig(SIGKILL); } void unhandled_exceptions(unsigned long entry, unsigned long addr, @@ -299,7 +300,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr, show_regs(regs); if (!user_mode(regs)) do_exit(SIGKILL); - force_sig(SIGKILL, current); + force_sig(SIGKILL); } extern int do_page_fault(unsigned long entry, unsigned long addr, @@ -326,7 +327,7 @@ void do_revinsn(struct pt_regs *regs) show_regs(regs); if (!user_mode(regs)) do_exit(SIGILL); - force_sig(SIGILL, current); + force_sig(SIGILL); } #ifdef CONFIG_ALIGNMENT_TRAP diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile index 8792fda19a64..7c3c1ccb196e 100644 --- a/arch/nds32/kernel/vdso/Makefile +++ b/arch/nds32/kernel/vdso/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Building a vDSO image for AArch64. # |