summaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-02 00:13:02 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-02 00:13:02 +0200
commit4cdc4cc2ad35f92338497d53d3e8b7876cf2a51d (patch)
tree6d603bb48eeed47a4d37547c3a96c79800a2390f /arch/hexagon
parentMerge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/a... (diff)
parentbitops/non-atomic: make @nr unsigned to avoid any DIV (diff)
downloadlinux-4cdc4cc2ad35f92338497d53d3e8b7876cf2a51d.tar.xz
linux-4cdc4cc2ad35f92338497d53d3e8b7876cf2a51d.zip
Merge tag 'asm-generic-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic updates from Arnd Bergmann: "The main content for 5.15 is a series that cleans up the handling of strncpy_from_user() and strnlen_user(), removing a lot of slightly incorrect versions of these in favor of the lib/strn*.c helpers that implement these correctly and more efficiently. The only architectures that retain a private version now are mips, ia64, um and parisc. I had offered to convert those at all, but Thomas Bogendoerfer wanted to keep the mips version for the moment until he had a chance to do regression testing. The branch also contains two patches for bitops and for ffs()" * tag 'asm-generic-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: bitops/non-atomic: make @nr unsigned to avoid any DIV asm-generic: ffs: Drop bogus reference to ffz location asm-generic: reverse GENERIC_{STRNCPY_FROM,STRNLEN}_USER symbols asm-generic: remove extra strn{cpy_from,len}_user declarations asm-generic: uaccess: remove inline strncpy_from_user/strnlen_user s390: use generic strncpy/strnlen from_user microblaze: use generic strncpy/strnlen from_user csky: use generic strncpy/strnlen from_user arc: use generic strncpy/strnlen from_user hexagon: use generic strncpy/strnlen from_user h8300: remove stale strncpy_from_user asm-generic/uaccess.h: remove __strncpy_from_user/__strnlen_user
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/include/asm/uaccess.h31
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c1
-rw-r--r--arch/hexagon/mm/Makefile2
-rw-r--r--arch/hexagon/mm/strnlen_user.S126
4 files changed, 1 insertions, 159 deletions
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index c1019a736ff1..ef5bfef8d490 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -57,38 +57,7 @@ unsigned long raw_copy_to_user(void __user *to, const void *from,
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s))
-#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
-
-/* get around the ifndef in asm-generic/uaccess.h */
-#define __strnlen_user __strnlen_user
-
-extern long __strnlen_user(const char __user *src, long n);
-
-static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
- long n);
-
#include <asm-generic/uaccess.h>
-/* Todo: an actual accelerated version of this. */
-static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
- long n)
-{
- long res = __strnlen_user(src, n);
-
- if (unlikely(!res))
- return -EFAULT;
-
- if (res > n) {
- long left = raw_copy_from_user(dst, src, n);
- if (unlikely(left))
- memset(dst + (n - left), 0, left);
- return n;
- } else {
- long left = raw_copy_from_user(dst, src, res);
- if (unlikely(left))
- memset(dst + (res - left), 0, left);
- return res-1;
- }
-}
#endif
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index 35545a7386a0..ec56ce2d92a2 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -15,7 +15,6 @@ EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
index 893838499591..49911a906fd0 100644
--- a/arch/hexagon/mm/Makefile
+++ b/arch/hexagon/mm/Makefile
@@ -4,4 +4,4 @@
#
obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
-obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
+obj-y += copy_to_user.o copy_from_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/strnlen_user.S b/arch/hexagon/mm/strnlen_user.S
deleted file mode 100644
index 4b5574a7cc9c..000000000000
--- a/arch/hexagon/mm/strnlen_user.S
+++ /dev/null
@@ -1,126 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * User string length functions for kernel
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- */
-
-#define isrc r0
-#define max r1 /* Do not change! */
-
-#define end r2
-#define tmp1 r3
-
-#define obo r6 /* off-by-one */
-#define start r7
-#define mod8 r8
-#define dbuf r15:14
-#define dcmp r13:12
-
-/*
- * The vector mask version of this turned out *really* badly.
- * The hardware loop version also turned out *really* badly.
- * Seems straight pointer arithmetic basically wins here.
- */
-
-#define fname __strnlen_user
-
- .text
- .global fname
- .type fname, @function
- .p2align 5 /* why? */
-fname:
- {
- mod8 = and(isrc,#7);
- end = add(isrc,max);
- start = isrc;
- }
- {
- P0 = cmp.eq(mod8,#0);
- mod8 = and(end,#7);
- dcmp = #0;
- if (P0.new) jump:t dw_loop; /* fire up the oven */
- }
-
-alignment_loop:
-fail_1: {
- tmp1 = memb(start++#1);
- }
- {
- P0 = cmp.eq(tmp1,#0);
- if (P0.new) jump:nt exit_found;
- P1 = cmp.gtu(end,start);
- mod8 = and(start,#7);
- }
- {
- if (!P1) jump exit_error; /* hit the end */
- P0 = cmp.eq(mod8,#0);
- }
- {
- if (!P0) jump alignment_loop;
- }
-
-
-
-dw_loop:
-fail_2: {
- dbuf = memd(start);
- obo = add(start,#1);
- }
- {
- P0 = vcmpb.eq(dbuf,dcmp);
- }
- {
- tmp1 = P0;
- P0 = cmp.gtu(end,start);
- }
- {
- tmp1 = ct0(tmp1);
- mod8 = and(end,#7);
- if (!P0) jump end_check;
- }
- {
- P0 = cmp.eq(tmp1,#32);
- if (!P0.new) jump:nt exit_found;
- if (!P0.new) start = add(obo,tmp1);
- }
- {
- start = add(start,#8);
- jump dw_loop;
- } /* might be nice to combine these jumps... */
-
-
-end_check:
- {
- P0 = cmp.gt(tmp1,mod8);
- if (P0.new) jump:nt exit_error; /* neverfound! */
- start = add(obo,tmp1);
- }
-
-exit_found:
- {
- R0 = sub(start,isrc);
- jumpr R31;
- }
-
-exit_error:
- {
- R0 = add(max,#1);
- jumpr R31;
- }
-
- /* Uh, what does the "fixup" return here? */
- .falign
-fix_1:
- {
- R0 = #0;
- jumpr R31;
- }
-
- .size fname,.-fname
-
-
-.section __ex_table,"a"
-.long fail_1,fix_1
-.long fail_2,fix_1
-.previous