summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan/selftest.c
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2021-12-04 00:38:17 +0100
committerPaul E. McKenney <paulmck@kernel.org>2021-12-10 01:42:28 +0100
commita70d36e6a0bd867ef42dce8ef46eb9b5a1515fb0 (patch)
tree1c474bfbcbbd99aac80e7a6ee3d3011d56f9c566 /kernel/kcsan/selftest.c
parentkcsan: Support WEAK_MEMORY with Clang where no objtool support exists (diff)
downloadlinux-a70d36e6a0bd867ef42dce8ef46eb9b5a1515fb0.tar.xz
linux-a70d36e6a0bd867ef42dce8ef46eb9b5a1515fb0.zip
kcsan: Make barrier tests compatible with lockdep
The barrier tests in selftest and the kcsan_test module only need the spinlock and mutex to test correct barrier instrumentation. Therefore, these were initially placed on the stack. However, lockdep asserts that locks are in static storage, and will generate this warning: | INFO: trying to register non-static key. | The code is fine but needs lockdep annotation, or maybe | you didn't initialize this object before use? | turning off the locking correctness validator. | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.16.0-rc1+ #3208 | Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-1ubuntu1.1 04/01/2014 | Call Trace: | <TASK> | dump_stack_lvl+0x88/0xd8 | dump_stack+0x15/0x1b | register_lock_class+0x6b3/0x840 | ... | test_barrier+0x490/0x14c7 | kcsan_selftest+0x47/0xa0 | ... To fix, move the test locks into static storage. Fixing the above also revealed that lock operations are strengthened on first use with lockdep enabled, due to lockdep calling out into non-instrumented files (recall that kernel/locking/lockdep.c is not instrumented with KCSAN). Only kcsan_test checks for over-instrumentation of *_lock() operations, where we can simply "warm up" the test locks to avoid the test case failing with lockdep. Reported-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/kcsan/selftest.c')
-rw-r--r--kernel/kcsan/selftest.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index 08c6b84b9ebe..b6d4da07d80a 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -113,6 +113,7 @@ static bool __init test_matching_access(void)
* positives: simple test to check at boot certain barriers are always properly
* instrumented. See kcsan_test for a more complete test.
*/
+static DEFINE_SPINLOCK(test_spinlock);
static bool __init test_barrier(void)
{
#ifdef CONFIG_KCSAN_WEAK_MEMORY
@@ -122,7 +123,6 @@ static bool __init test_barrier(void)
#endif
bool ret = true;
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
- DEFINE_SPINLOCK(spinlock);
atomic_t dummy;
long test_var;
@@ -172,8 +172,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_READ_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_READ_BARRIER(spin_unlock(&test_spinlock));
KCSAN_CHECK_WRITE_BARRIER(mb());
KCSAN_CHECK_WRITE_BARRIER(wmb());
@@ -202,8 +202,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&test_spinlock));
KCSAN_CHECK_RW_BARRIER(mb());
KCSAN_CHECK_RW_BARRIER(wmb());
@@ -235,8 +235,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_RW_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
kcsan_nestable_atomic_end();