summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan/debugfs.c
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-02-11 17:04:23 +0100
committerIngo Molnar <mingo@kernel.org>2020-03-21 09:44:14 +0100
commit703b321501c95c658275fd76775282fe45989641 (patch)
treefa43016074c8faca188a5e2baf186bc9eaf8ad6f /kernel/kcsan/debugfs.c
parentkcsan: Add kcsan_set_access_mask() support (diff)
downloadlinux-703b321501c95c658275fd76775282fe45989641.tar.xz
linux-703b321501c95c658275fd76775282fe45989641.zip
kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
This introduces ASSERT_EXCLUSIVE_BITS(var, mask). ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the following access is safe w.r.t. data races (however, please see the docbook comment for disclaimer here). For more context on why this was considered necessary, please see: http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw In particular, before this patch, data races between reads (that use @mask bits of an access that should not be modified concurrently) and writes (that change ~@mask bits not used by the readers) would have been annotated with "data_race()" (or "READ_ONCE()"). However, doing so would then hide real problems: we would no longer be able to detect harmful races between reads to @mask bits and writes to @mask bits. Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish: 1. Avoid proliferation of specific macros at the call sites: by including a single mask in the argument list, we can use the same macro in a wide variety of call sites, regardless of how and which bits in a field each call site actually accesses. 2. The existing code does not need to be modified (although READ_ONCE() may still be advisable if we cannot prove that the data race is always safe). 3. We catch bugs where the exclusive bits are modified concurrently. 4. We document properties of the current code. Acked-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Qian Cai <cai@lca.pw>
Diffstat (limited to 'kernel/kcsan/debugfs.c')
-rw-r--r--kernel/kcsan/debugfs.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 9bbba0e57c9b..2ff196123977 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters)
* debugfs file from multiple tasks to generate real conflicts and show reports.
*/
static long test_dummy;
+static long test_flags;
static noinline void test_thread(unsigned long iters)
{
+ const long CHANGE_BITS = 0xff00ff00ff00ff00L;
const struct kcsan_ctx ctx_save = current->kcsan_ctx;
cycles_t cycles;
@@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters)
memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+ pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
cycles = get_cycles();
while (iters--) {
+ /* These all should generate reports. */
__kcsan_check_read(&test_dummy, sizeof(test_dummy));
- __kcsan_check_write(&test_dummy, sizeof(test_dummy));
ASSERT_EXCLUSIVE_WRITER(test_dummy);
ASSERT_EXCLUSIVE_ACCESS(test_dummy);
+ ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+ ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
/* not actually instrumented */
WRITE_ONCE(test_dummy, iters); /* to observe value-change */
+ __kcsan_check_write(&test_dummy, sizeof(test_dummy));
+
+ test_flags ^= CHANGE_BITS; /* generate value-change */
+ __kcsan_check_write(&test_flags, sizeof(test_flags));
}
cycles = get_cycles() - cycles;