diff options
author | Marco Elver <elver@google.com> | 2020-02-04 18:21:10 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-03-21 09:42:18 +0100 |
commit | 1e6ee2f0fe8ae682757960edf455e99f611268a0 (patch) | |
tree | a7325816ac12d8f67d39f356aad32395e8344175 /kernel/kcsan | |
parent | kcsan: Add docbook header for data_race() (diff) | |
download | linux-1e6ee2f0fe8ae682757960edf455e99f611268a0.tar.xz linux-1e6ee2f0fe8ae682757960edf455e99f611268a0.zip |
kcsan: Add option to assume plain aligned writes up to word size are atomic
This adds option KCSAN_ASSUME_PLAIN_WRITES_ATOMIC. If enabled, plain
aligned writes up to word size are assumed to be atomic, and also not
subject to other unsafe compiler optimizations resulting in data races.
This option has been enabled by default to reflect current kernel-wide
preferences.
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/kcsan')
-rw-r--r-- | kernel/kcsan/core.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 64b30f7716a1..e3c7d8f34f2f 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/export.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/random.h> @@ -169,10 +170,20 @@ static __always_inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } -static __always_inline bool is_atomic(const volatile void *ptr) +static __always_inline bool +is_atomic(const volatile void *ptr, size_t size, int type) { - struct kcsan_ctx *ctx = get_ctx(); + struct kcsan_ctx *ctx; + + if ((type & KCSAN_ACCESS_ATOMIC) != 0) + return true; + if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) && + (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) && + IS_ALIGNED((unsigned long)ptr, size)) + return true; /* Assume aligned writes up to word size are atomic. */ + + ctx = get_ctx(); if (unlikely(ctx->atomic_next > 0)) { /* * Because we do not have separate contexts for nested @@ -193,7 +204,8 @@ static __always_inline bool is_atomic(const volatile void *ptr) return kcsan_is_atomic(ptr); } -static __always_inline bool should_watch(const volatile void *ptr, int type) +static __always_inline bool +should_watch(const volatile void *ptr, size_t size, int type) { /* * Never set up watchpoints when memory operations are atomic. @@ -202,7 +214,7 @@ static __always_inline bool should_watch(const volatile void *ptr, int type) * should not count towards skipped instructions, and (2) to actually * decrement kcsan_atomic_next for consecutive instruction stream. */ - if ((type & KCSAN_ACCESS_ATOMIC) != 0 || is_atomic(ptr)) + if (is_atomic(ptr, size, type)) return false; if (this_cpu_dec_return(kcsan_skip) >= 0) @@ -460,7 +472,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, if (unlikely(watchpoint != NULL)) kcsan_found_watchpoint(ptr, size, type, watchpoint, encoded_watchpoint); - else if (unlikely(should_watch(ptr, type))) + else if (unlikely(should_watch(ptr, size, type))) kcsan_setup_watchpoint(ptr, size, type); } |