summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-03-25 17:41:56 +0100
committerPaul E. McKenney <paulmck@kernel.org>2020-04-14 02:18:11 +0200
commit757a4cefde76697af2b2c284c8a320912b77e7e6 (patch)
tree977a8e0f86eb623366b89e15d298aa492dd28f46 /kernel/kcsan
parentkcsan: Avoid blocking producers in prepare_report() (diff)
downloadlinux-757a4cefde76697af2b2c284c8a320912b77e7e6.tar.xz
linux-757a4cefde76697af2b2c284c8a320912b77e7e6.zip
kcsan: Add support for scoped accesses
This adds support for scoped accesses, where the memory range is checked for the duration of the scope. The feature is implemented by inserting the relevant access information into a list of scoped accesses for the current execution context, which are then checked (until removed) on every call (through instrumentation) into the KCSAN runtime. An alternative, more complex, implementation could set up a watchpoint for the scoped access, and keep the watchpoint set up. This, however, would require first exposing a handle to the watchpoint, as well as dealing with cases such as accesses by the same thread while the watchpoint is still set up (and several more cases). It is also doubtful if this would provide any benefit, since the majority of delay where the watchpoint is set up is likely due to the injected delays by KCSAN. Therefore, the implementation in this patch is simpler and avoids hurting KCSAN's main use-case (normal data race detection); it also implicitly increases scoped-access race-detection-ability due to increased probability of setting up watchpoints by repeatedly calling __kcsan_check_access() throughout the scope of the access. The implementation required adding an additional conditional branch to the fast-path. However, the microbenchmark showed a *speedup* of ~5% on the fast-path. This appears to be due to subtly improved codegen by GCC from moving get_ctx() and associated load of preempt_count earlier. Suggested-by: Boqun Feng <boqun.feng@gmail.com> Suggested-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/kcsan')
-rw-r--r--kernel/kcsan/core.c83
-rw-r--r--kernel/kcsan/report.c33
2 files changed, 97 insertions, 19 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 4d8ea0fca5f1..a572aae61b98 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
@@ -42,6 +43,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
.atomic_nest_count = 0,
.in_flat_atomic = false,
.access_mask = 0,
+ .scoped_accesses = {LIST_POISON1, NULL},
};
/*
@@ -191,12 +193,23 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}
+/* Check scoped accesses; never inline because this is a slow-path! */
+static noinline void kcsan_check_scoped_accesses(void)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+ struct list_head *prev_save = ctx->scoped_accesses.prev;
+ struct kcsan_scoped_access *scoped_access;
+
+ ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
+ list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
+ __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
+ ctx->scoped_accesses.prev = prev_save;
+}
+
/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
-is_atomic(const volatile void *ptr, size_t size, int type)
+is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
{
- struct kcsan_ctx *ctx;
-
if (type & KCSAN_ACCESS_ATOMIC)
return true;
@@ -213,7 +226,6 @@ is_atomic(const volatile void *ptr, size_t size, int type)
IS_ALIGNED((unsigned long)ptr, size))
return true; /* Assume aligned writes up to word size are atomic. */
- ctx = get_ctx();
if (ctx->atomic_next > 0) {
/*
* Because we do not have separate contexts for nested
@@ -233,7 +245,7 @@ is_atomic(const volatile void *ptr, size_t size, int type)
}
static __always_inline bool
-should_watch(const volatile void *ptr, size_t size, int type)
+should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
{
/*
* Never set up watchpoints when memory operations are atomic.
@@ -242,7 +254,7 @@ should_watch(const volatile void *ptr, size_t size, int type)
* should not count towards skipped instructions, and (2) to actually
* decrement kcsan_atomic_next for consecutive instruction stream.
*/
- if (is_atomic(ptr, size, type))
+ if (is_atomic(ptr, size, type, ctx))
return false;
if (this_cpu_dec_return(kcsan_skip) >= 0)
@@ -563,8 +575,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
if (unlikely(watchpoint != NULL))
kcsan_found_watchpoint(ptr, size, type, watchpoint,
encoded_watchpoint);
- else if (unlikely(should_watch(ptr, size, type)))
- kcsan_setup_watchpoint(ptr, size, type);
+ else {
+ struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
+
+ if (unlikely(should_watch(ptr, size, type, ctx)))
+ kcsan_setup_watchpoint(ptr, size, type);
+ else if (unlikely(ctx->scoped_accesses.prev))
+ kcsan_check_scoped_accesses();
+ }
}
/* === Public interface ===================================================== */
@@ -660,6 +678,55 @@ void kcsan_set_access_mask(unsigned long mask)
}
EXPORT_SYMBOL(kcsan_set_access_mask);
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+
+ __kcsan_check_access(ptr, size, type);
+
+ ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
+
+ INIT_LIST_HEAD(&sa->list);
+ sa->ptr = ptr;
+ sa->size = size;
+ sa->type = type;
+
+ if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
+ INIT_LIST_HEAD(&ctx->scoped_accesses);
+ list_add(&sa->list, &ctx->scoped_accesses);
+
+ ctx->disable_count--;
+ return sa;
+}
+EXPORT_SYMBOL(kcsan_begin_scoped_access);
+
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+
+ if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
+ return;
+
+ ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
+
+ list_del(&sa->list);
+ if (list_empty(&ctx->scoped_accesses))
+ /*
+ * Ensure we do not enter kcsan_check_scoped_accesses()
+ * slow-path if unnecessary, and avoids requiring list_empty()
+ * in the fast-path (to avoid a READ_ONCE() and potential
+ * uaccess warning).
+ */
+ ctx->scoped_accesses.prev = NULL;
+
+ ctx->disable_count--;
+
+ __kcsan_check_access(sa->ptr, sa->size, sa->type);
+}
+EXPORT_SYMBOL(kcsan_end_scoped_access);
+
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{
check_access(ptr, size, type);
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index ae0a383238ea..ddc18f1224a4 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -205,6 +205,20 @@ skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
static const char *get_access_type(int type)
{
+ if (type & KCSAN_ACCESS_ASSERT) {
+ if (type & KCSAN_ACCESS_SCOPED) {
+ if (type & KCSAN_ACCESS_WRITE)
+ return "assert no accesses (scoped)";
+ else
+ return "assert no writes (scoped)";
+ } else {
+ if (type & KCSAN_ACCESS_WRITE)
+ return "assert no accesses";
+ else
+ return "assert no writes";
+ }
+ }
+
switch (type) {
case 0:
return "read";
@@ -214,17 +228,14 @@ static const char *get_access_type(int type)
return "write";
case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked)";
-
- /*
- * ASSERT variants:
- */
- case KCSAN_ACCESS_ASSERT:
- case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC:
- return "assert no writes";
- case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE:
- case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
- return "assert no accesses";
-
+ case KCSAN_ACCESS_SCOPED:
+ return "read (scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
+ return "read (marked, scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
+ return "write (scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "write (marked, scoped)";
default:
BUG();
}