summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-21 02:56:06 +0100
committerAl Viro <viro@zeniv.linux.org.uk>2017-03-29 00:22:11 +0200
commitd597580d373774b1bdab84b3d26ff0b55162b916 (patch)
treeebbd7de012fcb731c40c67791e4e5e370844f70c
parentasm-generic: zero in __get_user(), not __get_user_fn() (diff)
downloadlinux-d597580d373774b1bdab84b3d26ff0b55162b916.tar.xz
linux-d597580d373774b1bdab84b3d26ff0b55162b916.zip
generic ...copy_..._user primitives
provide raw_copy_..._user() and select ARCH_HAS_RAW_COPY_USER to use those. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--arch/Kconfig3
-rw-r--r--include/asm-generic/uaccess.h11
-rw-r--r--include/linux/uaccess.h187
-rw-r--r--lib/Makefile2
-rw-r--r--lib/usercopy.c26
5 files changed, 229 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index cd211a14a88f..315d37626ddc 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -847,4 +847,7 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
+config ARCH_HAS_RAW_COPY_USER
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index bd7a05e9582b..d65c311eb128 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -86,7 +86,11 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
+#ifdef CONFIG_ARCH_HAS_RAW_COPY_USER
+ return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+#else
return unlikely(__copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+#endif
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
@@ -147,7 +151,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
+#ifdef CONFIG_ARCH_HAS_RAW_COPY_USER
+ return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+#else
return unlikely(__copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+#endif
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -156,6 +164,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
+#ifndef CONFIG_ARCH_HAS_RAW_COPY_USER
+
#ifndef __copy_from_user_inatomic
#define __copy_from_user_inatomic __copy_from_user
#endif
@@ -185,6 +195,7 @@ static inline long copy_to_user(void __user *to,
else
return n;
}
+#endif
/*
* Copy a null terminated string from userspace.
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9c3ae8706e9d..5f76bc995d96 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -3,6 +3,7 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#include <linux/kasan-checks.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -11,6 +12,192 @@
#include <asm/uaccess.h>
+#ifdef CONFIG_ARCH_HAS_RAW_COPY_USER
+/*
+ * Architectures should provide two primitives (raw_copy_{to,from}_user())
+ * select ARCH_HAS_RAW_COPY_FROM_USER and get rid of their private instances
+ * of copy_{to,from}_user() and __copy_{to,from}_user{,_inatomic}(). Once
+ * all of them switch, this part of linux/uaccess.h will become unconditional.
+ *
+ * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
+ * return the amount left to copy. They should assume that access_ok() has
+ * already been checked (and succeeded); they should *not* zero-pad anything.
+ * No KASAN or object size checks either - those belong here.
+ *
+ * Both of these functions should attempt to copy size bytes starting at from
+ * into the area starting at to. They must not fetch or store anything
+ * outside of those areas. Return value must be between 0 (everything
+ * copied successfully) and size (nothing copied).
+ *
+ * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
+ * at to must become equal to the bytes fetched from the corresponding area
+ * starting at from. All data past to + size - N must be left unmodified.
+ *
+ * If copying succeeds, the return value must be 0. If some data cannot be
+ * fetched, it is permitted to copy less than had been fetched; the only
+ * hard requirement is that not storing anything at all (i.e. returning size)
+ * should happen only when nothing could be copied. In other words, you don't
+ * have to squeeze as much as possible - it is allowed, but not necessary.
+ *
+ * For raw_copy_from_user() to always points to kernel memory and no faults
+ * on store should happen. Interpretation of from is affected by set_fs().
+ * For raw_copy_to_user() it's the other way round.
+ *
+ * Both can be inlined - it's up to architectures whether it wants to bother
+ * with that. They should not be used directly; they are used to implement
+ * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
+ * that are used instead. Out of those, __... ones are inlined. Plain
+ * copy_{to,from}_user() might or might not be inlined. If you want them
+ * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ *
+ * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
+ * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
+ * at all; their callers absolutely must check the return value.
+ *
+ * Biarch ones should also provide raw_copy_in_user() - similar to the above,
+ * but both source and destination are __user pointers (affected by set_fs()
+ * as usual) and both source and destination can trigger faults.
+ */
+
+static __always_inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+/**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ * The caller should also make sure he pins the user space address
+ * so that we don't result in page fault and sleep.
+ */
+static __always_inline unsigned long
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+#ifdef INLINE_COPY_FROM_USER
+static inline unsigned long
+_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+#else
+extern unsigned long
+_copy_from_user(void *, const void __user *, unsigned long);
+#endif
+
+#ifdef INLINE_COPY_TO_USER
+static inline unsigned long
+_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+#else
+extern unsigned long
+_copy_to_user(void __user *, const void *, unsigned long);
+#endif
+
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+ kasan_check_write(to, n);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ n = _copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ kasan_check_read(from, n);
+ might_fault();
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = _copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+#ifdef CONFIG_COMPAT
+static __always_inline unsigned long __must_check
+__copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return raw_copy_in_user(to, from, n);
+}
+static __always_inline unsigned long __must_check
+copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ n = raw_copy_in_user(to, from, n);
+ return n;
+}
+#endif
+#endif
+
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
diff --git a/lib/Makefile b/lib/Makefile
index 320ac46a8725..7d875c389172 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -242,3 +242,5 @@ UBSAN_SANITIZE_ubsan.o := n
obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
+
+obj-$(CONFIG_ARCH_HAS_RAW_COPY_USER) += usercopy.o
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..1b6010a3beb8
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,26 @@
+#include <linux/uaccess.h>
+
+/* out-of-line parts */
+
+#ifndef INLINE_COPY_FROM_USER
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+EXPORT_SYMBOL(_copy_from_user);
+#endif
+
+#ifndef INLINE_COPY_TO_USER
+unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+{
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+#endif