diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-09 07:01:58 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-09 07:01:58 +0100 |
commit | 987a08741d72c1f735e31bfe478dc2ac6be8fc7e (patch) | |
tree | ea1f17fd95a2123a504711209055c803126f34b6 /arch | |
parent | Merge tag 'perf-tools-fixes-for-v5.12-2020-03-07' of git://git.kernel.org/pub... (diff) | |
parent | Merge branch 'work.sparc32' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
download | linux-987a08741d72c1f735e31bfe478dc2ac6be8fc7e.tar.xz linux-987a08741d72c1f735e31bfe478dc2ac6be8fc7e.zip |
Merge git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller:
"Just some more random bits from Al, including a conversion over to
generic extables"
* git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc:
sparc32: take ->thread.flags out
sparc32: get rid of fake_swapper_regs
sparc64: get rid of fake_swapper_regs
sparc32: switch to generic extables
sparc32: switch copy_user.S away from range exception table entries
sparc32: get rid of range exception table entries in checksum_32.S
sparc32: switch __bzero() away from range exception table entries
sparc32: kill lookup_fault()
sparc32: don't bother with lookup_fault() in __bzero()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/extable.h (renamed from arch/sparc/include/asm/extable_64.h) | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/processor_32.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 38 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/head_32.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/head_64.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/process_32.c | 12 | ||||
-rw-r--r-- | arch/sparc/kernel/setup_32.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/setup_64.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/unaligned_32.c | 106 | ||||
-rw-r--r-- | arch/sparc/lib/checksum_32.S | 64 | ||||
-rw-r--r-- | arch/sparc/lib/copy_user.S | 315 | ||||
-rw-r--r-- | arch/sparc/lib/memset.S | 87 | ||||
-rw-r--r-- | arch/sparc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc/mm/extable.c | 107 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 80 | ||||
-rw-r--r-- | arch/sparc/mm/mm_32.h | 2 |
20 files changed, 198 insertions, 642 deletions
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 7e078bc73ef5..8fb09eec8c3e 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -8,7 +8,6 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/extable_64.h> #include <asm/spitfire.h> #include <asm/adi.h> diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable.h index 5a0171907b7e..554a9dc376fc 100644 --- a/arch/sparc/include/asm/extable_64.h +++ b/arch/sparc/include/asm/extable.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_EXTABLE64_H -#define __ASM_EXTABLE64_H +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 3c4bc2189092..b6242f7771e9 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -50,16 +50,12 @@ struct thread_struct { unsigned long fsr; unsigned long fpqdepth; struct fpq fpqueue[16]; - unsigned long flags; mm_segment_t current_ds; }; -#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */ -#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */ - #define INIT_THREAD { \ - .flags = SPARC_FLAG_KTHREAD, \ .current_ds = KERNEL_DS, \ + .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \ } /* Do necessary setup to start up a newly executed thread. */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 42cd4cd3892e..8047a9caab2f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -118,6 +118,7 @@ struct thread_info { .task = &tsk, \ .current_ds = ASI_P, \ .preempt_count = INIT_PREEMPT_COUNT, \ + .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \ } /* how to get the thread information struct from C */ diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index dd85bc2c2cad..390094200fc4 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -1,6 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_UACCESS_H #define ___ASM_SPARC_UACCESS_H + +#include <asm/extable.h> + #if defined(__sparc__) && defined(__arch64__) #include <asm/uaccess_64.h> #else diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 0a2d3ebc4bb8..4a12346bb69c 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -13,9 +13,6 @@ #include <asm/processor.h> -#define ARCH_HAS_SORT_EXTABLE -#define ARCH_HAS_SEARCH_EXTABLE - /* Sparc is not segmented, however we need to be able to fool access_ok() * when doing system calls from kernel mode legitimately. * @@ -40,36 +37,6 @@ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define access_ok(addr, size) __access_ok((unsigned long)(addr), size) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - * - * There is a special way how to put a range of potentially faulting - * insns (like twenty ldd/std's with now intervening other instructions) - * You specify address of first in insn and 0 in fixup and in the next - * exception_table_entry you specify last potentially faulting insn + 1 - * and in fixup the routine which should handle the fault. - * That fixup code will get - * (faulting_insn_address - first_insn_in_the_range_address)/4 - * in %g2 (ie. index of the faulting instruction in the range). - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2); - /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right * pointer type.. @@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size) unsigned long ret; __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,3\n\t" - ".previous\n\t" "mov %2, %%o1\n" - "1:\n\t" "call __bzero\n\t" " mov %1, %%o0\n\t" "mov %%o0, %0\n" diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 698cf69f74e9..30eb4c6414d1 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -10,7 +10,6 @@ #include <linux/string.h> #include <asm/asi.h> #include <asm/spitfire.h> -#include <asm/extable_64.h> #include <asm/processor.h> diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index be30c8d4cc73..6044b82b9767 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -515,7 +515,7 @@ continue_boot: /* I want a kernel stack NOW! */ set init_thread_union, %g1 - set (THREAD_SIZE - STACKFRAME_SZ), %g2 + set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2 add %g1, %g2, %sp mov 0, %fp /* And for good luck */ diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index c5ff2472b3d9..72a5bdc833ea 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -706,7 +706,7 @@ tlb_fixup_done: wr %g0, ASI_P, %asi mov 1, %g1 sllx %g1, THREAD_SHIFT, %g1 - sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 + sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1 add %g6, %g1, %sp /* Set per-cpu pointer initially to zero, this makes diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index b91e88058e0c..3b9794978e5b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -216,16 +216,6 @@ void flush_thread(void) clear_thread_flag(TIF_USEDFPU); #endif } - - /* This task is no longer a kernel thread. */ - if (current->thread.flags & SPARC_FLAG_KTHREAD) { - current->thread.flags &= ~SPARC_FLAG_KTHREAD; - - /* We must fixup kregs as well. */ - /* XXX This was not fixed for ti for a while, worked. Unused? */ - current->thread.kregs = (struct pt_regs *) - (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); - } } static inline struct sparc_stackf __user * @@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, extern int nwindows; unsigned long psr; memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); - p->thread.flags |= SPARC_FLAG_KTHREAD; p->thread.current_ds = KERNEL_DS; ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); childregs->u_regs[UREG_G1] = sp; /* function */ @@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, } memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); childregs->u_regs[UREG_FP] = sp; - p->thread.flags &= ~SPARC_FLAG_KTHREAD; p->thread.current_ds = USER_DS; ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpsr = current->thread.fork_kpsr | PSR_PIL; diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index eea43a1aef1b..c8e0dd99f370 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -266,7 +266,6 @@ static __init void leon_patch(void) } struct tt_entry *sparc_ttable; -static struct pt_regs fake_swapper_regs; /* Called from head_32.S - before we have setup anything * in the kernel. Be very careful with what you do here. @@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p) (*(linux_dbvec->teach_debugger))(); } - init_task.thread.kregs = &fake_swapper_regs; - /* Run-time patch instructions to match the cpu model */ per_cpu_patch(); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index d87244197d5c..48abee4eee29 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -165,8 +165,6 @@ extern int root_mountflags; char reboot_command[COMMAND_LINE_SIZE]; -static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; - static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; @@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p) rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; #endif - task_thread_info(&init_task)->kregs = &fake_swapper_regs; - #ifdef CONFIG_IP_PNP if (!ic_set_manually) { phandle chosen = prom_finddevice("/chosen"); diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 83db94c0b431..ef5c5207c9ff 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -16,6 +16,7 @@ #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> +#include <linux/extable.h> #include <asm/setup.h> @@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn) static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { - unsigned long g2 = regs->u_regs [UREG_G2]; - unsigned long fixup = search_extables_range(regs->pc, &g2); + const struct exception_table_entry *entry; - if (!fixup) { + entry = search_exception_tables(regs->pc); + if (!entry) { unsigned long address = compute_effective_address(regs, insn); if(address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); @@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) die_if_kernel("Oops", regs); /* Not reached */ } - regs->pc = fixup; + regs->pc = entry->fixup; regs->npc = regs->pc + 4; - regs->u_regs [UREG_G2] = g2; } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) @@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) } } -static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, - enum direction dir) -{ - unsigned int reg; - int size = ((insn >> 19) & 3) == 3 ? 8 : 4; - - if ((regs->pc | regs->npc) & 3) - return 0; - - /* Must access_ok() in all the necessary places. */ -#define WINREG_ADDR(regnum) \ - ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum))) - - reg = (insn >> 25) & 0x1f; - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - reg = (insn >> 14) & 0x1f; - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - if (!(insn & 0x2000)) { - reg = (insn & 0x1f); - if (reg >= 16) { - if (!access_ok(WINREG_ADDR(reg - 16), size)) - return -EFAULT; - } - } -#undef WINREG_ADDR - return 0; -} - -static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) +asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { send_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)safe_compute_effective_address(regs, insn), 0, current); } - -asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) -{ - enum direction dir; - - if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || - (((insn >> 30) & 3) != 3)) - goto kill_user; - dir = decode_direction(insn); - if(!ok_for_user(regs, insn, dir)) { - goto kill_user; - } else { - int err, size = decode_access_size(insn); - unsigned long addr; - - if(floating_point_load_or_store_p(insn)) { - printk("User FPU load/store unaligned unsupported.\n"); - goto kill_user; - } - - addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); - switch(dir) { - case load: - err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), - regs), - size, (unsigned long *) addr, - decode_signedness(insn)); - break; - - case store: - err = do_int_store(((insn>>25)&0x1f), size, - (unsigned long *) addr, regs); - break; - - case both: - /* - * This was supported in 2.4. However, we question - * the value of SWAP instruction across word boundaries. - */ - printk("Unaligned SWAP unsupported.\n"); - err = -EFAULT; - break; - - default: - unaligned_panic("Impossible user unaligned trap."); - goto out; - } - if (err) - goto kill_user; - else - advance(regs); - goto out; - } - -kill_user: - user_mna_trap_fault(regs, insn); -out: - ; -} diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 7488d130faf7..781e39b3c009 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -155,13 +155,6 @@ cpout: retl ! get outta here .text; \ .align 4 -#define EXT(start,end) \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word start, 0, end, cc_fault; \ - .text; \ - .align 4 - /* This aligned version executes typically in 8.5 superscalar cycles, this * is the best I can do. I say 8.5 because the final add will pair with * the next ldd in the main unrolled loop. Thus the pipe is always full. @@ -169,20 +162,20 @@ cpout: retl ! get outta here * please check the fixup code below as well. */ #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [src + off + 0x00], t0; \ - ldd [src + off + 0x08], t2; \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ addxcc t0, sum, sum; \ - ldd [src + off + 0x10], t4; \ + EX(ldd [src + off + 0x10], t4); \ addxcc t1, sum, sum; \ - ldd [src + off + 0x18], t6; \ + EX(ldd [src + off + 0x18], t6); \ addxcc t2, sum, sum; \ - std t0, [dst + off + 0x00]; \ + EX(std t0, [dst + off + 0x00]); \ addxcc t3, sum, sum; \ - std t2, [dst + off + 0x08]; \ + EX(std t2, [dst + off + 0x08]); \ addxcc t4, sum, sum; \ - std t4, [dst + off + 0x10]; \ + EX(std t4, [dst + off + 0x10]); \ addxcc t5, sum, sum; \ - std t6, [dst + off + 0x18]; \ + EX(std t6, [dst + off + 0x18]); \ addxcc t6, sum, sum; \ addxcc t7, sum, sum; @@ -191,39 +184,39 @@ cpout: retl ! get outta here * Viking MXCC into streaming mode. Ho hum... */ #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [src + off + 0x00], t0; \ - ldd [src + off + 0x08], t2; \ - ldd [src + off + 0x10], t4; \ - ldd [src + off + 0x18], t6; \ - st t0, [dst + off + 0x00]; \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ + EX(ldd [src + off + 0x10], t4); \ + EX(ldd [src + off + 0x18], t6); \ + EX(st t0, [dst + off + 0x00]); \ addxcc t0, sum, sum; \ - st t1, [dst + off + 0x04]; \ + EX(st t1, [dst + off + 0x04]); \ addxcc t1, sum, sum; \ - st t2, [dst + off + 0x08]; \ + EX(st t2, [dst + off + 0x08]); \ addxcc t2, sum, sum; \ - st t3, [dst + off + 0x0c]; \ + EX(st t3, [dst + off + 0x0c]); \ addxcc t3, sum, sum; \ - st t4, [dst + off + 0x10]; \ + EX(st t4, [dst + off + 0x10]); \ addxcc t4, sum, sum; \ - st t5, [dst + off + 0x14]; \ + EX(st t5, [dst + off + 0x14]); \ addxcc t5, sum, sum; \ - st t6, [dst + off + 0x18]; \ + EX(st t6, [dst + off + 0x18]); \ addxcc t6, sum, sum; \ - st t7, [dst + off + 0x1c]; \ + EX(st t7, [dst + off + 0x1c]); \ addxcc t7, sum, sum; /* Yuck, 6 superscalar cycles... */ #define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \ - ldd [src - off - 0x08], t0; \ - ldd [src - off - 0x00], t2; \ + EX(ldd [src - off - 0x08], t0); \ + EX(ldd [src - off - 0x00], t2); \ addxcc t0, sum, sum; \ - st t0, [dst - off - 0x08]; \ + EX(st t0, [dst - off - 0x08]); \ addxcc t1, sum, sum; \ - st t1, [dst - off - 0x04]; \ + EX(st t1, [dst - off - 0x04]); \ addxcc t2, sum, sum; \ - st t2, [dst - off - 0x00]; \ + EX(st t2, [dst - off - 0x00]); \ addxcc t3, sum, sum; \ - st t3, [dst - off + 0x04]; + EX(st t3, [dst - off + 0x04]); /* Handle the end cruft code out of band for better cache patterns. */ cc_end_cruft: @@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -10: EXT(5b, 10b) ! note for exception handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) -12: EXT(cctbl, 12b) ! note for exception table handling - addx %g0, %g7, %g7 +12: addx %g0, %g7, %g7 andcc %o3, 0xf, %g0 ! check for low bits set ccte: bne cc_end_cruft ! something left, handle it out of band andcc %o3, 8, %g0 ! begin checks for that code @@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -11: EXT(ccdbl, 11b) ! note for exception table handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S index dc72f2b970b7..954572c78539 100644 --- a/arch/sparc/lib/copy_user.S +++ b/arch/sparc/lib/copy_user.S @@ -21,98 +21,134 @@ /* Work around cpp -rob */ #define ALLOC #alloc #define EXECINSTR #execinstr + +#define EX_ENTRY(l1, l2) \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word l1, l2; \ + .text; + #define EX(x,y,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ -99: ba fixupretl; \ - a, b, %g3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 +99: retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) #define EX2(x,y,c,d,e,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ 99: c, d, e; \ - ba fixupretl; \ - a, b, %g3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 + retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) #define EXO2(x,y) \ 98: x, y; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 97f; \ - .text; \ - .align 4 + EX_ENTRY(98b, 97f) -#define EXT(start,end,handler) \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word start, 0, end, handler; \ - .text; \ - .align 4 +#define LD(insn, src, offset, reg, label) \ +98: insn [%src + (offset)], %reg; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) -/* Please do not change following macros unless you change logic used - * in .fixup at the end of this file as well - */ +#define ST(insn, dst, offset, reg, label) \ +98: insn %reg, [%dst + (offset)]; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) /* Both these macros have to start with exactly the same insn */ +/* left: g7 + (g1 % 128) - offset */ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - ldd [%src + (offset) + 0x10], %t4; \ - ldd [%src + (offset) + 0x18], %t6; \ - st %t0, [%dst + (offset) + 0x00]; \ - st %t1, [%dst + (offset) + 0x04]; \ - st %t2, [%dst + (offset) + 0x08]; \ - st %t3, [%dst + (offset) + 0x0c]; \ - st %t4, [%dst + (offset) + 0x10]; \ - st %t5, [%dst + (offset) + 0x14]; \ - st %t6, [%dst + (offset) + 0x18]; \ - st %t7, [%dst + (offset) + 0x1c]; - + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(st, dst, offset + 0x04, t1, bigchunk_fault) \ + ST(st, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \ + ST(st, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(st, dst, offset + 0x14, t5, bigchunk_fault) \ + ST(st, dst, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x1c, t7, bigchunk_fault) + +/* left: g7 + (g1 % 128) - offset */ #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - ldd [%src + (offset) + 0x10], %t4; \ - ldd [%src + (offset) + 0x18], %t6; \ - std %t0, [%dst + (offset) + 0x00]; \ - std %t2, [%dst + (offset) + 0x08]; \ - std %t4, [%dst + (offset) + 0x10]; \ - std %t6, [%dst + (offset) + 0x18]; + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(std, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(std, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(std, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(std, dst, offset + 0x18, t6, bigchunk_fault) + .section .fixup,#alloc,#execinstr +bigchunk_fault: + sub %g7, %g5, %o0 + and %g1, 127, %g1 + retl + add %o0, %g1, %o0 + +/* left: offset + 16 + (g1 % 16) */ #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldd [%src - (offset) - 0x10], %t0; \ - ldd [%src - (offset) - 0x08], %t2; \ - st %t0, [%dst - (offset) - 0x10]; \ - st %t1, [%dst - (offset) - 0x0c]; \ - st %t2, [%dst - (offset) - 0x08]; \ - st %t3, [%dst - (offset) - 0x04]; + LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \ + LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \ + ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \ + ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x04), t3, lastchunk_fault) -#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ - lduh [%src + (offset) + 0x00], %t0; \ - lduh [%src + (offset) + 0x02], %t1; \ - lduh [%src + (offset) + 0x04], %t2; \ - lduh [%src + (offset) + 0x06], %t3; \ - sth %t0, [%dst + (offset) + 0x00]; \ - sth %t1, [%dst + (offset) + 0x02]; \ - sth %t2, [%dst + (offset) + 0x04]; \ - sth %t3, [%dst + (offset) + 0x06]; + .section .fixup,#alloc,#execinstr +lastchunk_fault: + and %g1, 15, %g1 + retl + sub %g1, %g5, %o0 +/* left: o3 + (o2 % 16) - offset */ +#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ + LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \ + LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \ + LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \ + LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \ + ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \ + ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \ + ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \ + ST(sth, dst, offset + 0x06, t3, halfchunk_fault) + +/* left: o3 + (o2 % 16) + offset + 2 */ #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ - ldub [%src - (offset) - 0x02], %t0; \ - ldub [%src - (offset) - 0x01], %t1; \ - stb %t0, [%dst - (offset) - 0x02]; \ - stb %t1, [%dst - (offset) - 0x01]; + LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault) + + .section .fixup,#alloc,#execinstr +halfchunk_fault: + and %o2, 15, %o2 + sub %o3, %g5, %o3 + retl + add %o2, %o3, %o0 + +/* left: offset + 2 + (o2 % 2) */ +#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \ + LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault) + + .section .fixup,#alloc,#execinstr +last_shortchunk_fault: + and %o2, 1, %o2 + retl + sub %o2, %g5, %o0 .text .align 4 @@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) -80: - EXT(5b, 80b, 50f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne 5b @@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */ jmpl %o5 + %lo(copy_user_table_end), %g0 add %o0, %g7, %o0 -copy_user_table: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) @@ -210,7 +243,6 @@ copy_user_table: MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) copy_user_table_end: - EXT(copy_user_table, copy_user_table_end, 51f) be copy_user_last7 andcc %g1, 4, %g0 @@ -250,8 +282,6 @@ ldd_std: MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) -81: - EXT(ldd_std, 81b, 52f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne ldd_std @@ -290,8 +320,6 @@ cannot_optimize: 10: MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5) MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5) -82: - EXT(10b, 82b, 53f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne 10b @@ -308,8 +336,6 @@ byte_chunk: MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) -83: - EXT(byte_chunk, 83b, 54f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne byte_chunk @@ -325,16 +351,14 @@ short_end: add %o1, %o3, %o1 jmpl %o5 + %lo(short_table_end), %g0 andcc %o2, 1, %g0 -84: - MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) - MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3) short_table_end: - EXT(84b, short_table_end, 55f) be 1f nop EX(ldub [%o1], %g2, add %g0, 1) @@ -363,123 +387,8 @@ short_aligned_end: .section .fixup,#alloc,#execinstr .align 4 97: - mov %o2, %g3 -fixupretl: retl - mov %g3, %o0 - -/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */ -50: -/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK - * happens. This is derived from the amount ldd reads, st stores, etc. - * x = g2 % 12; - * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4); - * o0 += (g2 / 12) * 32; - */ - cmp %g2, 12 - add %o0, %g7, %o0 - bcs 1f - cmp %g2, 24 - bcs 2f - cmp %g2, 36 - bcs 3f - nop - sub %g2, 12, %g2 - sub %g7, 32, %g7 -3: sub %g2, 12, %g2 - sub %g7, 32, %g7 -2: sub %g2, 12, %g2 - sub %g7, 32, %g7 -1: cmp %g2, 4 - bcs,a 60f - clr %g2 - sub %g2, 4, %g2 - sll %g2, 2, %g2 -60: and %g1, 0x7f, %g3 - sub %o0, %g7, %o0 - add %g3, %g7, %g3 - ba fixupretl - sub %g3, %g2, %g3 -51: -/* i = 41 - g2; j = i % 6; - * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16; - * o0 -= (i / 6) * 16 + 16; - */ - neg %g2 - and %g1, 0xf, %g1 - add %g2, 41, %g2 - add %o0, %g1, %o0 -1: cmp %g2, 6 - bcs,a 2f - cmp %g2, 4 - add %g1, 16, %g1 - b 1b - sub %g2, 6, %g2 -2: bcc,a 2f - mov 16, %g2 - inc %g2 - sll %g2, 2, %g2 -2: add %g1, %g2, %g3 - ba fixupretl - sub %o0, %g3, %o0 -52: -/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0; - o0 += (g2 / 8) * 32 */ - andn %g2, 7, %g4 - add %o0, %g7, %o0 - andcc %g2, 4, %g0 - and %g2, 3, %g2 - sll %g4, 2, %g4 - sll %g2, 3, %g2 - bne 60b - sub %g7, %g4, %g7 - ba 60b - clr %g2 -53: -/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0; - o0 += (g2 & 8) */ - and %g2, 3, %g4 - andcc %g2, 4, %g0 - and %g2, 8, %g2 - sll %g4, 1, %g4 - be 1f - add %o0, %g2, %o0 - add %g2, %g4, %g2 -1: and %o2, 0xf, %g3 - add %g3, %o3, %g3 - ba fixupretl - sub %g3, %g2, %g3 -54: -/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0; - o0 += (g2 / 4) * 2 */ - srl %g2, 2, %o4 - and %g2, 1, %o5 - srl %g2, 1, %g2 - add %o4, %o4, %o4 - and %o5, %g2, %o5 - and %o2, 0xf, %o2 - add %o0, %o4, %o0 - sub %o3, %o5, %o3 - sub %o2, %o4, %o2 - ba fixupretl - add %o2, %o3, %g3 -55: -/* i = 27 - g2; - g3 = (o2 & 1) + i / 4 * 2 + !(i & 3); - o0 -= i / 4 * 2 + 1 */ - neg %g2 - and %o2, 1, %o2 - add %g2, 27, %g2 - srl %g2, 2, %o5 - andcc %g2, 3, %g0 - mov 1, %g2 - add %o5, %o5, %o5 - be,a 1f - clr %g2 -1: add %g2, %o5, %g3 - sub %o0, %g3, %o0 - ba fixupretl - add %g3, %o2, %g3 + mov %o2, %o0 .globl __copy_user_end __copy_user_end: diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index f427f34b8b79..eaff68213fdf 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -19,7 +19,7 @@ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ -99: ba 30f; \ +99: retl; \ a, b, %o0; \ .section __ex_table,ALLOC; \ .align 4; \ @@ -27,35 +27,44 @@ .text; \ .align 4 -#define EXT(start,end,handler) \ +#define STORE(source, base, offset, n) \ +98: std source, [base + offset + n]; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: ba 30f; \ + sub %o3, n - offset, %o3; \ .section __ex_table,ALLOC; \ .align 4; \ - .word start, 0, end, handler; \ + .word 98b, 99b; \ .text; \ - .align 4 + .align 4; + +#define STORE_LAST(source, base, offset, n) \ + EX(std source, [base - offset - n], \ + add %o1, offset + n); /* Please don't change these macros, unless you change the logic * in the .fixup section below as well. * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ -#define ZERO_BIG_BLOCK(base, offset, source) \ - std source, [base + offset + 0x00]; \ - std source, [base + offset + 0x08]; \ - std source, [base + offset + 0x10]; \ - std source, [base + offset + 0x18]; \ - std source, [base + offset + 0x20]; \ - std source, [base + offset + 0x28]; \ - std source, [base + offset + 0x30]; \ - std source, [base + offset + 0x38]; +#define ZERO_BIG_BLOCK(base, offset, source) \ + STORE(source, base, offset, 0x00); \ + STORE(source, base, offset, 0x08); \ + STORE(source, base, offset, 0x10); \ + STORE(source, base, offset, 0x18); \ + STORE(source, base, offset, 0x20); \ + STORE(source, base, offset, 0x28); \ + STORE(source, base, offset, 0x30); \ + STORE(source, base, offset, 0x38); #define ZERO_LAST_BLOCKS(base, offset, source) \ - std source, [base - offset - 0x38]; \ - std source, [base - offset - 0x30]; \ - std source, [base - offset - 0x28]; \ - std source, [base - offset - 0x20]; \ - std source, [base - offset - 0x18]; \ - std source, [base - offset - 0x10]; \ - std source, [base - offset - 0x08]; \ - std source, [base - offset - 0x00]; + STORE_LAST(source, base, offset, 0x38); \ + STORE_LAST(source, base, offset, 0x30); \ + STORE_LAST(source, base, offset, 0x28); \ + STORE_LAST(source, base, offset, 0x20); \ + STORE_LAST(source, base, offset, 0x18); \ + STORE_LAST(source, base, offset, 0x10); \ + STORE_LAST(source, base, offset, 0x08); \ + STORE_LAST(source, base, offset, 0x00); .text .align 4 @@ -68,8 +77,6 @@ __bzero_begin: .globl memset EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(memset) - .globl __memset_start, __memset_end -__memset_start: memset: mov %o0, %g1 mov 1, %g4 @@ -122,8 +129,6 @@ __bzero: ZERO_BIG_BLOCK(%o0, 0x00, %g2) subcc %o3, 128, %o3 ZERO_BIG_BLOCK(%o0, 0x40, %g2) -11: - EXT(10b, 11b, 20f) bne 10b add %o0, 128, %o0 @@ -138,11 +143,9 @@ __bzero: jmp %o4 add %o0, %o2, %o0 -12: ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 13: - EXT(12b, 13b, 21f) be 8f andcc %o1, 4, %g0 @@ -182,37 +185,13 @@ __bzero: 5: retl clr %o0 -__memset_end: .section .fixup,#alloc,#execinstr .align 4 -20: - cmp %g2, 8 - bleu 1f - and %o1, 0x7f, %o1 - sub %g2, 9, %g2 - add %o3, 64, %o3 -1: - sll %g2, 3, %g2 - add %o3, %o1, %o0 - b 30f - sub %o0, %g2, %o0 -21: - mov 8, %o0 - and %o1, 7, %o1 - sub %o0, %g2, %o0 - sll %o0, 3, %o0 - b 30f - add %o0, %o1, %o0 30: -/* %o4 is faulting address, %o5 is %pc where fault occurred */ - save %sp, -104, %sp - mov %i5, %o0 - mov %i7, %o1 - call lookup_fault - mov %i4, %o2 - ret - restore + and %o1, 0x7f, %o1 + retl + add %o3, %o1, %o0 .globl __bzero_end __bzero_end: diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 68db1f859b02..871354aa3c00 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -8,7 +8,7 @@ ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o obj-y += init_$(BITS).o -obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o +obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += srmmu_access.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC32) += leon_mm.o diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c deleted file mode 100644 index 241b40641873..000000000000 --- a/arch/sparc/mm/extable.c +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/sparc/mm/extable.c - */ - -#include <linux/module.h> -#include <linux/extable.h> -#include <linux/uaccess.h> - -void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish) -{ -} - -/* Caller knows they are in a range if ret->fixup == 0 */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *base, - const size_t num, - unsigned long value) -{ - int i; - - /* Single insn entries are encoded as: - * word 1: insn address - * word 2: fixup code address - * - * Range entries are encoded as: - * word 1: first insn address - * word 2: 0 - * word 3: last insn address + 4 bytes - * word 4: fixup code address - * - * Deleted entries are encoded as: - * word 1: unused - * word 2: -1 - * - * See asm/uaccess.h for more details. - */ - - /* 1. Try to find an exact match. */ - for (i = 0; i < num; i++) { - if (base[i].fixup == 0) { - /* A range entry, skip both parts. */ - i++; - continue; - } - - /* A deleted entry; see trim_init_extable */ - if (base[i].fixup == -1) - continue; - - if (base[i].insn == value) - return &base[i]; - } - - /* 2. Try to find a range match. */ - for (i = 0; i < (num - 1); i++) { - if (base[i].fixup) - continue; - - if (base[i].insn <= value && base[i + 1].insn > value) - return &base[i]; - - i++; - } - - return NULL; -} - -#ifdef CONFIG_MODULES -/* We could memmove them around; easier to mark the trimmed ones. */ -void trim_init_extable(struct module *m) -{ - unsigned int i; - bool range; - - for (i = 0; i < m->num_exentries; i += range ? 2 : 1) { - range = m->extable[i].fixup == 0; - - if (within_module_init(m->extable[i].insn, m)) { - m->extable[i].fixup = -1; - if (range) - m->extable[i+1].fixup = -1; - } - if (range) - i++; - } -} -#endif /* CONFIG_MODULES */ - -/* Special extable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2) -{ - const struct exception_table_entry *entry; - - entry = search_exception_tables(addr); - if (!entry) - return 0; - - /* Inside range? Fix g2 and return correct fixup */ - if (!entry->fixup) { - *g2 = (addr - entry->insn) / 4; - return (entry + 1)->fixup; - } - - return entry->fixup; -} diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 40ce087dfecf..de2031c2b2d7 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/kdebug.h> #include <linux/uaccess.h> +#include <linux/extable.h> #include <asm/page.h> #include <asm/openprom.h> @@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, - unsigned long address) -{ - struct pt_regs regs; - unsigned long g2; - unsigned int insn; - int i; - - i = search_extables_range(ret_pc, &g2); - switch (i) { - case 3: - /* load & store will be handled by fixup */ - return 3; - - case 1: - /* store will be handled by fixup, load will bump out */ - /* for _to_ macros */ - insn = *((unsigned int *) pc); - if ((insn >> 21) & 1) - return 1; - break; - - case 2: - /* load will be handled by fixup, store will bump out */ - /* for _from_ macros */ - insn = *((unsigned int *) pc); - if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) - return 2; - break; - - default: - break; - } - - memset(®s, 0, sizeof(regs)); - regs.pc = pc; - regs.npc = pc + 4; - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop\n\t" - "nop\n\t" - "nop\n" : "=r" (regs.psr)); - unhandled_fault(address, current, ®s); - - /* Not reached */ - return 0; -} - static inline void show_signal_msg(struct pt_regs *regs, int sig, int code, unsigned long address, struct task_struct *tsk) @@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - unsigned int fixup; - unsigned long g2; int from_user = !(regs->psr & PSR_PS); int code; vm_fault_t fault; @@ -281,30 +232,19 @@ bad_area_nosemaphore: /* Is this in ex_table? */ no_context: - g2 = regs->u_regs[UREG_G2]; if (!from_user) { - fixup = search_extables_range(regs->pc, &g2); - /* Values below 10 are reserved for other things */ - if (fixup > 10) { - extern const unsigned int __memset_start[]; - extern const unsigned int __memset_end[]; + const struct exception_table_entry *entry; + entry = search_exception_tables(regs->pc); #ifdef DEBUG_EXCEPTIONS - printk("Exception: PC<%08lx> faddr<%08lx>\n", - regs->pc, address); - printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", - regs->pc, fixup, g2); + printk("Exception: PC<%08lx> faddr<%08lx>\n", + regs->pc, address); + printk("EX_TABLE: insn<%08lx> fixup<%08x>\n", + regs->pc, entry->fixup); #endif - if ((regs->pc >= (unsigned long)__memset_start && - regs->pc < (unsigned long)__memset_end)) { - regs->u_regs[UREG_I4] = address; - regs->u_regs[UREG_I5] = regs->pc; - } - regs->u_regs[UREG_G2] = g2; - regs->pc = fixup; - regs->npc = regs->pc + 4; - return; - } + regs->pc = entry->fixup; + regs->npc = regs->pc + 4; + return; } unhandled_fault(address, tsk, regs); diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h index ce750a99eea9..ee55f1080634 100644 --- a/arch/sparc/mm/mm_32.h +++ b/arch/sparc/mm/mm_32.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* fault_32.c - visible as they are called from assembler */ -asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, - unsigned long address); asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address); |