diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2021-03-10 18:46:49 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-04-03 12:22:02 +0200 |
commit | 9975f852ce1bf041a1a81bf882e29ee7a3b78ca6 (patch) | |
tree | e43c7690c69823b263b4d58f17b298ad2a065792 /arch/powerpc/include/asm/uaccess.h | |
parent | powerpc/uaccess: Remove __chk_user_ptr() in __get/put_user (diff) | |
download | linux-9975f852ce1bf041a1a81bf882e29ee7a3b78ca6.tar.xz linux-9975f852ce1bf041a1a81bf882e29ee7a3b78ca6.zip |
powerpc/uaccess: Remove calls to __get_user_bad() and __put_user_bad()
__get_user_bad() and __put_user_bad() are functions that are
declared but not defined, in order to make the link fail in
case they are called.
Nowadays, we have BUILD_BUG() and BUILD_BUG_ON() for that, and
they have the advantage to break the build earlier as it breaks
it at compile time instead of link time.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d7d839e994f49fae4ff7b70fac72bd951272436b.1615398265.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/include/asm/uaccess.h')
-rw-r--r-- | arch/powerpc/include/asm/uaccess.h | 11 |
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 605fa79a6e6f..47a454d19351 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -53,8 +53,6 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -extern long __put_user_bad(void); - #define __put_user_size(x, ptr, size, retval) \ do { \ __label__ __pu_failed; \ @@ -136,12 +134,10 @@ do { \ case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \ case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \ case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \ - default: __put_user_bad(); \ + default: BUILD_BUG(); \ } \ } while (0) -extern long __get_user_bad(void); - /* * This does an atomic 128 byte aligned load from userspace. * Upto caller to do enable_kernel_vmx() before calling! @@ -196,14 +192,13 @@ extern long __get_user_bad(void); #define __get_user_size_allowed(x, ptr, size, retval) \ do { \ retval = 0; \ - if (size > sizeof(x)) \ - (x) = __get_user_bad(); \ + BUILD_BUG_ON(size > sizeof(x)); \ switch (size) { \ case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ - default: (x) = __get_user_bad(); \ + default: BUILD_BUG(); \ } \ } while (0) |