diff options
Diffstat (limited to 'arch/mips/include')
-rw-r--r-- | arch/mips/include/asm/checksum.h | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/fpu.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable-32.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable-64.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/r4kcache.h | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/uaccess.h | 449 |
6 files changed, 67 insertions, 393 deletions
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index c8b574f7e0cc..77cad232a1c6 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { might_fault(); - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel((__force void *)src, dst, len, sum, err_ptr); else @@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, { might_fault(); if (access_ok(VERIFY_WRITE, dst, len)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel(src, (__force void *)dst, len, sum, err_ptr); diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 321752bcbab6..f94455f964ec 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -12,6 +12,7 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> +#include <linux/ptrace.h> #include <linux/thread_info.h> #include <linux/bitops.h> diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d21f3da7bdb6..6f94bed571c4 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -16,6 +16,7 @@ #include <asm/cachectl.h> #include <asm/fixmap.h> +#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> extern int temp_tlb_entry; diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 514cbc0a6a67..130a2a6c1531 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -17,6 +17,7 @@ #include <asm/cachectl.h> #include <asm/fixmap.h> +#define __ARCH_USE_5LEVEL_HACK #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) #include <asm-generic/pgtable-nopmd.h> #else diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 55fd94e6cd0b..7f12d7e27c94 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,7 +20,7 @@ #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/mipsmtregs.h> -#include <linux/uaccess.h> /* for segment_eq() */ +#include <linux/uaccess.h> /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_icache)(void); @@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ \ __##pfx##flush_prologue \ \ - if (segment_eq(get_fs(), USER_DS)) { \ + if (!uaccess_kernel()) { \ while (1) { \ protected_cachee_op(hitop, addr); \ if (addr == aend) \ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5347cfe15af2..99e629a590a5 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -12,8 +12,6 @@ #define _ASM_UACCESS_H #include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/thread_info.h> #include <linux/string.h> #include <asm/asm-eva.h> #include <asm/extable.h> @@ -71,9 +69,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) @@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void) if (!IS_ENABLED(CONFIG_EVA)) return false; - return segment_eq(get_fs(), get_ds()); + return uaccess_kernel(); } /* @@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void) * this function, memory access functions may still return -EFAULT. */ -#define __access_mask get_fs().seg - -#define __access_ok(addr, size, mask) \ -({ \ - unsigned long __addr = (unsigned long) (addr); \ - unsigned long __size = size; \ - unsigned long __mask = mask; \ - unsigned long __ok; \ - \ - __chk_user_ptr(addr); \ - __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ - __ua_size(__size))); \ - __ok == 0; \ -}) +static inline int __access_ok(const void __user *p, unsigned long size) +{ + unsigned long addr = (unsigned long)p; + return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; +} #define access_ok(type, addr, size) \ - likely(__access_ok((addr), (size), __access_mask)) + likely(__access_ok((addr), (size))) /* * put_user: - Write a simple value into user space. @@ -811,157 +797,7 @@ extern void __put_user_unaligned_unknown(void); extern size_t __copy_user(void *__to, const void *__from, size_t __n); -#ifndef CONFIG_EVA -#define __invoke_copy_to_user(to, from, n) \ -({ \ - register void __user *__cu_to_r __asm__("$4"); \ - register const void *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(__copy_user) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user(to, from, n) - -#endif - -/* - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define __copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - might_fault(); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); - -#define __copy_to_user_inatomic(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -#define __copy_from_user_inatomic(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ - __cu_from,\ - __cu_len);\ - else \ - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ - __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -/* - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_to_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) - -#ifndef CONFIG_EVA - -#define __invoke_copy_from_user(to, from, n) \ +#define __invoke_copy_from(func, to, from, n) \ ({ \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ @@ -972,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len_r = (n); \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user) \ + __MODULE_JAL(func) \ ".set\tnoat\n\t" \ __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ @@ -984,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) - -/* For userland <-> userland operations */ -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user(to, from, n) - -/* For kernel <-> kernel operations */ -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ +#define __invoke_copy_to(func, to, from, n) \ ({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ + register void __user *__cu_to_r __asm__("$4"); \ + register const void *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ \ __cu_to_r = (to); \ __cu_from_r = (from); \ __cu_len_r = (n); \ __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user_inatomic) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ + __MODULE_JAL(func) \ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ @@ -1018,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_inatomic(to, from, n) \ +#define __invoke_copy_from_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_to_kernel(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + +#define ___invoke_copy_in_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#ifndef CONFIG_EVA +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) #else /* EVA specific functions */ -extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, - size_t __n); extern size_t __copy_from_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_to_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); -#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func_ptr) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func_ptr) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - /* * Source or destination address is in userland. We need to go through * the TLB */ #define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, \ - __copy_user_inatomic_eva) + __invoke_copy_from(__copy_from_user_eva, to, from, n) #define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) + __invoke_copy_to(__copy_to_user_eva, to, from, n) #define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) - -/* - * Source or destination address in the kernel. We are not going through - * the TLB - */ -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) - -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_from(__copy_in_user_eva, to, from, n) #endif /* CONFIG_EVA */ -/* - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define __copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_to_kernel(to, from, n); + else + return __invoke_copy_to_user(to, from, n); +} -/* - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - memset(__cu_to, 0, __cu_len); \ - } \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_from_kernel(to, from, n); + else + return __invoke_copy_from_user(to, from, n); +} -#define __copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER -#define copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ - __cu_len); \ - } else { \ - if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return ___invoke_copy_in_kernel(to, from, n); + else + return ___invoke_copy_in_user(to, from, n); +} extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); |