summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-05-21 16:20:39 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-06-11 20:04:01 +0200
commit75d75b7a4d5489cc6a5e91ace306f6c13f376f33 (patch)
tree3aa69e5ff92430125ce2678e5dd1a5d4976ca035 /kernel
parentkcsan: Restrict supported compilers (diff)
downloadlinux-75d75b7a4d5489cc6a5e91ace306f6c13f376f33.tar.xz
linux-75d75b7a4d5489cc6a5e91ace306f6c13f376f33.zip
kcsan: Support distinguishing volatile accesses
In the kernel, the "volatile" keyword is used in various concurrent contexts, whether in low-level synchronization primitives or for legacy reasons. If supported by the compiler, it will be assumed that aligned volatile accesses up to sizeof(long long) (matching compiletime_assert_rwonce_type()) are atomic. Recent versions of Clang [1] (GCC tentative [2]) can instrument volatile accesses differently. Add the option (required) to enable the instrumentation, and provide the necessary runtime functions. None of the updated compilers are widely available yet (Clang 11 will be the first release to support the feature). [1] https://github.com/llvm/llvm-project/commit/5a2c31116f412c3b6888be361137efd705e05814 [2] https://gcc.gnu.org/pipermail/gcc-patches/2020-April/544452.html This change allows removing of any explicit checks in primitives such as READ_ONCE() and WRITE_ONCE(). [ bp: Massage commit message a bit. ] Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200521142047.169334-4-elver@google.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kcsan/core.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index a73a66cf79df..15f67949d11e 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -790,6 +790,49 @@ void __tsan_write_range(void *ptr, size_t size)
EXPORT_SYMBOL(__tsan_write_range);
/*
+ * Use of explicit volatile is generally disallowed [1], however, volatile is
+ * still used in various concurrent context, whether in low-level
+ * synchronization primitives or for legacy reasons.
+ * [1] https://lwn.net/Articles/233479/
+ *
+ * We only consider volatile accesses atomic if they are aligned and would pass
+ * the size-check of compiletime_assert_rwonce_type().
+ */
+#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
+ void __tsan_volatile_read##size(void *ptr) \
+ { \
+ const bool is_atomic = size <= sizeof(long long) && \
+ IS_ALIGNED((unsigned long)ptr, size); \
+ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
+ return; \
+ check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
+ } \
+ EXPORT_SYMBOL(__tsan_volatile_read##size); \
+ void __tsan_unaligned_volatile_read##size(void *ptr) \
+ __alias(__tsan_volatile_read##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
+ void __tsan_volatile_write##size(void *ptr) \
+ { \
+ const bool is_atomic = size <= sizeof(long long) && \
+ IS_ALIGNED((unsigned long)ptr, size); \
+ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
+ return; \
+ check_access(ptr, size, \
+ KCSAN_ACCESS_WRITE | \
+ (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
+ } \
+ EXPORT_SYMBOL(__tsan_volatile_write##size); \
+ void __tsan_unaligned_volatile_write##size(void *ptr) \
+ __alias(__tsan_volatile_write##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
+
+DEFINE_TSAN_VOLATILE_READ_WRITE(1);
+DEFINE_TSAN_VOLATILE_READ_WRITE(2);
+DEFINE_TSAN_VOLATILE_READ_WRITE(4);
+DEFINE_TSAN_VOLATILE_READ_WRITE(8);
+DEFINE_TSAN_VOLATILE_READ_WRITE(16);
+
+/*
* The below are not required by KCSAN, but can still be emitted by the
* compiler.
*/