diff options
Diffstat (limited to 'kernel/kcsan')
-rw-r--r-- | kernel/kcsan/atomic.h | 23 | ||||
-rw-r--r-- | kernel/kcsan/core.c | 128 | ||||
-rw-r--r-- | kernel/kcsan/debugfs.c | 2 | ||||
-rw-r--r-- | kernel/kcsan/kcsan.h | 39 | ||||
-rw-r--r-- | kernel/kcsan/kcsan_test.c | 32 | ||||
-rw-r--r-- | kernel/kcsan/permissive.h | 94 | ||||
-rw-r--r-- | kernel/kcsan/report.c | 169 |
7 files changed, 302 insertions, 185 deletions
diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h deleted file mode 100644 index 530ae1bda8e7..000000000000 --- a/kernel/kcsan/atomic.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Rules for implicitly atomic memory accesses. - * - * Copyright (C) 2019, Google LLC. - */ - -#ifndef _KERNEL_KCSAN_ATOMIC_H -#define _KERNEL_KCSAN_ATOMIC_H - -#include <linux/types.h> - -/* - * Special rules for certain memory where concurrent conflicting accesses are - * common, however, the current convention is to not mark them; returns true if - * access to @ptr should be considered atomic. Called from slow-path. - */ -static bool kcsan_is_atomic_special(const volatile void *ptr) -{ - return false; -} - -#endif /* _KERNEL_KCSAN_ATOMIC_H */ diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 45c821d4e8bd..76e67d1e02d4 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -20,9 +20,9 @@ #include <linux/sched.h> #include <linux/uaccess.h> -#include "atomic.h" #include "encoding.h" #include "kcsan.h" +#include "permissive.h" static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; @@ -301,9 +301,9 @@ static inline void reset_kcsan_skip(void) this_cpu_write(kcsan_skip, skip_count); } -static __always_inline bool kcsan_is_enabled(void) +static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx) { - return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0; + return READ_ONCE(kcsan_enabled) && !ctx->disable_count; } /* Introduce delay depending on context and configuration. */ @@ -353,10 +353,18 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_t *watchpoint, long encoded_watchpoint) { + const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; + struct kcsan_ctx *ctx = get_ctx(); unsigned long flags; bool consumed; - if (!kcsan_is_enabled()) + /* + * We know a watchpoint exists. Let's try to keep the race-window + * between here and finally consuming the watchpoint below as small as + * possible -- avoid unneccessarily complex code until consumed. + */ + + if (!kcsan_is_enabled(ctx)) return; /* @@ -364,14 +372,22 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, * reporting a race where e.g. the writer set up the watchpoint, but the * reader has access_mask!=0, we have to ignore the found watchpoint. */ - if (get_ctx()->access_mask != 0) + if (ctx->access_mask) return; /* - * Consume the watchpoint as soon as possible, to minimize the chances - * of !consumed. Consuming the watchpoint must always be guarded by - * kcsan_is_enabled() check, as otherwise we might erroneously - * triggering reports when disabled. + * If the other thread does not want to ignore the access, and there was + * a value change as a result of this thread's operation, we will still + * generate a report of unknown origin. + * + * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter. + */ + if (!is_assert && kcsan_ignore_address(ptr)) + return; + + /* + * Consuming the watchpoint must be guarded by kcsan_is_enabled() to + * avoid erroneously triggering reports if the context is disabled. */ consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint); @@ -380,9 +396,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (consumed) { kcsan_save_irqtrace(current); - kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, - KCSAN_REPORT_CONSUMED_WATCHPOINT, - watchpoint - watchpoints); + kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints); kcsan_restore_irqtrace(current); } else { /* @@ -393,7 +407,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]); } - if ((type & KCSAN_ACCESS_ASSERT) != 0) + if (is_assert) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); else atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]); @@ -407,15 +421,11 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; atomic_long_t *watchpoint; - union { - u8 _1; - u16 _2; - u32 _4; - u64 _8; - } expect_value; + u64 old, new, diff; unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); + struct kcsan_ctx *ctx = get_ctx(); unsigned long irq_flags = 0; /* @@ -424,16 +434,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) */ reset_kcsan_skip(); - if (!kcsan_is_enabled()) + if (!kcsan_is_enabled(ctx)) goto out; /* - * Special atomic rules: unlikely to be true, so we check them here in - * the slow-path, and not in the fast-path in is_atomic(). Call after - * kcsan_is_enabled(), as we may access memory that is not yet - * initialized during early boot. + * Check to-ignore addresses after kcsan_is_enabled(), as we may access + * memory that is not yet initialized during early boot. */ - if (!is_assert && kcsan_is_atomic_special(ptr)) + if (!is_assert && kcsan_ignore_address(ptr)) goto out; if (!check_encodable((unsigned long)ptr, size)) { @@ -468,33 +476,24 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Read the current value, to later check and infer a race if the data * was modified via a non-instrumented access, e.g. from a device. */ - expect_value._8 = 0; + old = 0; switch (size) { case 1: - expect_value._1 = READ_ONCE(*(const u8 *)ptr); + old = READ_ONCE(*(const u8 *)ptr); break; case 2: - expect_value._2 = READ_ONCE(*(const u16 *)ptr); + old = READ_ONCE(*(const u16 *)ptr); break; case 4: - expect_value._4 = READ_ONCE(*(const u32 *)ptr); + old = READ_ONCE(*(const u32 *)ptr); break; case 8: - expect_value._8 = READ_ONCE(*(const u64 *)ptr); + old = READ_ONCE(*(const u64 *)ptr); break; default: break; /* ignore; we do not diff the values */ } - if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) { - kcsan_disable_current(); - pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n", - is_write ? "write" : "read", size, ptr, - watchpoint_slot((unsigned long)ptr), - encode_watchpoint((unsigned long)ptr, size, is_write)); - kcsan_enable_current(); - } - /* * Delay this thread, to increase probability of observing a racy * conflicting access. @@ -505,34 +504,37 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Re-read value, and check if it is as expected; if not, we infer a * racy access. */ - access_mask = get_ctx()->access_mask; + access_mask = ctx->access_mask; + new = 0; switch (size) { case 1: - expect_value._1 ^= READ_ONCE(*(const u8 *)ptr); - if (access_mask) - expect_value._1 &= (u8)access_mask; + new = READ_ONCE(*(const u8 *)ptr); break; case 2: - expect_value._2 ^= READ_ONCE(*(const u16 *)ptr); - if (access_mask) - expect_value._2 &= (u16)access_mask; + new = READ_ONCE(*(const u16 *)ptr); break; case 4: - expect_value._4 ^= READ_ONCE(*(const u32 *)ptr); - if (access_mask) - expect_value._4 &= (u32)access_mask; + new = READ_ONCE(*(const u32 *)ptr); break; case 8: - expect_value._8 ^= READ_ONCE(*(const u64 *)ptr); - if (access_mask) - expect_value._8 &= (u64)access_mask; + new = READ_ONCE(*(const u64 *)ptr); break; default: break; /* ignore; we do not diff the values */ } - /* Were we able to observe a value-change? */ - if (expect_value._8 != 0) + diff = old ^ new; + if (access_mask) + diff &= access_mask; + + /* + * Check if we observed a value change. + * + * Also check if the data race should be ignored (the rules depend on + * non-zero diff); if it is to be ignored, the below rules for + * KCSAN_VALUE_CHANGE_MAYBE apply. + */ + if (diff && !kcsan_ignore_data_race(size, type, old, new, diff)) value_change = KCSAN_VALUE_CHANGE_TRUE; /* Check if this access raced with another. */ @@ -566,8 +568,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); - kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL, - watchpoint - watchpoints); + kcsan_report_known_origin(ptr, size, type, value_change, + watchpoint - watchpoints, + old, new, access_mask); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -576,9 +579,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) - kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, - KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, - watchpoint - watchpoints); + kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask); } /* @@ -655,6 +656,15 @@ void __init kcsan_init(void) pr_info("enabled early\n"); WRITE_ONCE(kcsan_enabled, true); } + + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) || + IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) || + IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) || + IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { + pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n"); + } else { + pr_info("strict mode configured\n"); + } } /* === Exported interface =================================================== */ diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index e65de172ccf7..1d1d1b0e4248 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -64,7 +64,7 @@ static noinline void microbenchmark(unsigned long iters) { const struct kcsan_ctx ctx_save = current->kcsan_ctx; const bool was_enabled = READ_ONCE(kcsan_enabled); - cycles_t cycles; + u64 cycles; /* We may have been called from an atomic region; reset context. */ memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 9881099d4179..f36e25c497ed 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -116,30 +116,27 @@ enum kcsan_value_change { KCSAN_VALUE_CHANGE_TRUE, }; -enum kcsan_report_type { - /* - * The thread that set up the watchpoint and briefly stalled was - * signalled that another thread triggered the watchpoint. - */ - KCSAN_REPORT_RACE_SIGNAL, - - /* - * A thread found and consumed a matching watchpoint. - */ - KCSAN_REPORT_CONSUMED_WATCHPOINT, +/* + * The calling thread hit and consumed a watchpoint: set the access information + * to be consumed by the reporting thread. No report is printed yet. + */ +void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, + int watchpoint_idx); - /* - * No other thread was observed to race with the access, but the data - * value before and after the stall differs. - */ - KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, -}; +/* + * The calling thread observed that the watchpoint it set up was hit and + * consumed: print the full report based on information set by the racing + * thread. + */ +void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, + enum kcsan_value_change value_change, int watchpoint_idx, + u64 old, u64 new, u64 mask); /* - * Print a race report from thread that encountered the race. + * No other thread was observed to race with the access, but the data value + * before and after the stall differs. Reports a race of "unknown origin". */ -extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, - enum kcsan_report_type type, int watchpoint_idx); +void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, + u64 old, u64 new, u64 mask); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 8bcffbdef3d3..dc55fd5a36fc 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -414,6 +414,14 @@ static noinline void test_kernel_atomic_builtins(void) __atomic_load_n(&test_var, __ATOMIC_RELAXED); } +static noinline void test_kernel_xor_1bit(void) +{ + /* Do not report data races between the read-writes. */ + kcsan_nestable_atomic_begin(); + test_var ^= 0x10000; + kcsan_nestable_atomic_end(); +} + /* ===== Test cases ===== */ /* Simple test with normal data race. */ @@ -952,6 +960,29 @@ static void test_atomic_builtins(struct kunit *test) KUNIT_EXPECT_FALSE(test, match_never); } +__no_kcsan +static void test_1bit_value_change(struct kunit *test) +{ + const struct expect_report expect = { + .access = { + { test_kernel_read, &test_var, sizeof(test_var), 0 }, + { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + }, + }; + bool match = false; + + begin_test_checks(test_kernel_read, test_kernel_xor_1bit); + do { + match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) + ? report_available() + : report_matches(&expect); + } while (!end_test_checks(match)); + if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + KUNIT_EXPECT_FALSE(test, match); + else + KUNIT_EXPECT_TRUE(test, match); +} + /* * Generate thread counts for all test cases. Values generated are in interval * [2, 5] followed by exponentially increasing thread counts from 8 to 32. @@ -1024,6 +1055,7 @@ static struct kunit_case kcsan_test_cases[] = { KCSAN_KUNIT_CASE(test_jiffies_noreport), KCSAN_KUNIT_CASE(test_seqlock_noreport), KCSAN_KUNIT_CASE(test_atomic_builtins), + KCSAN_KUNIT_CASE(test_1bit_value_change), {}, }; diff --git a/kernel/kcsan/permissive.h b/kernel/kcsan/permissive.h new file mode 100644 index 000000000000..2c01fe4a59ee --- /dev/null +++ b/kernel/kcsan/permissive.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Special rules for ignoring entire classes of data-racy memory accesses. None + * of the rules here imply that such data races are generally safe! + * + * All rules in this file can be configured via CONFIG_KCSAN_PERMISSIVE. Keep + * them separate from core code to make it easier to audit. + * + * Copyright (C) 2019, Google LLC. + */ + +#ifndef _KERNEL_KCSAN_PERMISSIVE_H +#define _KERNEL_KCSAN_PERMISSIVE_H + +#include <linux/bitops.h> +#include <linux/sched.h> +#include <linux/types.h> + +/* + * Access ignore rules based on address. + */ +static __always_inline bool kcsan_ignore_address(const volatile void *ptr) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + /* + * Data-racy bitops on current->flags are too common, ignore completely + * for now. + */ + return ptr == ¤t->flags; +} + +/* + * Data race ignore rules based on access type and value change patterns. + */ +static bool +kcsan_ignore_data_race(size_t size, int type, u64 old, u64 new, u64 diff) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + /* + * Rules here are only for plain read accesses, so that we still report + * data races between plain read-write accesses. + */ + if (type || size > sizeof(long)) + return false; + + /* + * A common pattern is checking/setting just 1 bit in a variable; for + * example: + * + * if (flags & SOME_FLAG) { ... } + * + * and elsewhere flags is updated concurrently: + * + * flags |= SOME_OTHER_FLAG; // just 1 bit + * + * While it is still recommended that such accesses be marked + * appropriately, in many cases these types of data races are so common + * that marking them all is often unrealistic and left to maintainer + * preference. + * + * The assumption in all cases is that with all known compiler + * optimizations (including those that tear accesses), because no more + * than 1 bit changed, the plain accesses are safe despite the presence + * of data races. + * + * The rules here will ignore the data races if we observe no more than + * 1 bit changed. + * + * Of course many operations can effecively change just 1 bit, but the + * general assuption that data races involving 1-bit changes can be + * tolerated still applies. + * + * And in case a true bug is missed, the bug likely manifests as a + * reportable data race elsewhere. + */ + if (hweight64(diff) == 1) { + /* + * Exception: Report data races where the values look like + * ordinary booleans (one of them was 0 and the 0th bit was + * changed) More often than not, they come with interesting + * memory ordering requirements, so let's report them. + */ + if (!((!old || !new) && diff == 1)) + return true; + } + + return false; +} + +#endif /* _KERNEL_KCSAN_PERMISSIVE_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 56016e8e7461..21137929d428 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -325,13 +325,10 @@ static void print_verbose_info(struct task_struct *task) print_irqtrace_events(task); } -/* - * Returns true if a report was generated, false otherwise. - */ -static bool print_report(enum kcsan_value_change value_change, - enum kcsan_report_type type, +static void print_report(enum kcsan_value_change value_change, const struct access_info *ai, - const struct other_info *other_info) + const struct other_info *other_info, + u64 old, u64 new, u64 mask) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); @@ -344,25 +341,24 @@ static bool print_report(enum kcsan_value_change value_change, * Must check report filter rules before starting to print. */ if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr])) - return false; + return; - if (type == KCSAN_REPORT_RACE_SIGNAL) { + if (other_info) { other_skipnr = get_stack_skipnr(other_info->stack_entries, other_info->num_stack_entries); other_frame = other_info->stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ if (skip_report(value_change, other_frame)) - return false; + return; } if (rate_limit_report(this_frame, other_frame)) - return false; + return; /* Print report header. */ pr_err("==================================================================\n"); - switch (type) { - case KCSAN_REPORT_RACE_SIGNAL: { + if (other_info) { int cmp; /* @@ -374,22 +370,15 @@ static bool print_report(enum kcsan_value_change value_change, get_bug_type(ai->access_type | other_info->ai.access_type), (void *)(cmp < 0 ? other_frame : this_frame), (void *)(cmp < 0 ? this_frame : other_frame)); - } break; - - case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + } else { pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type), (void *)this_frame); - break; - - default: - BUG(); } pr_err("\n"); /* Print information about the racing accesses. */ - switch (type) { - case KCSAN_REPORT_RACE_SIGNAL: + if (other_info) { pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(other_info->ai.access_type), other_info->ai.ptr, other_info->ai.size, get_thread_desc(other_info->ai.task_pid), @@ -407,16 +396,10 @@ static bool print_report(enum kcsan_value_change value_change, pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(ai->access_type), ai->ptr, ai->size, get_thread_desc(ai->task_pid), ai->cpu_id); - break; - - case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + } else { pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(ai->access_type), ai->ptr, ai->size, get_thread_desc(ai->task_pid), ai->cpu_id); - break; - - default: - BUG(); } /* Print stack trace of this thread. */ stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, @@ -425,24 +408,41 @@ static bool print_report(enum kcsan_value_change value_change, if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) print_verbose_info(current); + /* Print observed value change. */ + if (ai->size <= 8) { + int hex_len = ai->size * 2; + u64 diff = old ^ new; + + if (mask) + diff &= mask; + if (diff) { + pr_err("\n"); + pr_err("value changed: 0x%0*llx -> 0x%0*llx\n", + hex_len, old, hex_len, new); + if (mask) { + pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n", + hex_len, diff, hex_len, mask); + } + } + } + /* Print report footer. */ pr_err("\n"); pr_err("Reported by Kernel Concurrency Sanitizer on:\n"); dump_stack_print_info(KERN_DEFAULT); pr_err("==================================================================\n"); - return true; + if (panic_on_warn) + panic("panic_on_warn set ...\n"); } static void release_report(unsigned long *flags, struct other_info *other_info) { - if (other_info) - /* - * Use size to denote valid/invalid, since KCSAN entirely - * ignores 0-sized accesses. - */ - other_info->ai.size = 0; - + /* + * Use size to denote valid/invalid, since KCSAN entirely ignores + * 0-sized accesses. + */ + other_info->ai.size = 0; raw_spin_unlock_irqrestore(&report_lock, *flags); } @@ -575,48 +575,42 @@ discard: return false; } -/* - * Depending on the report type either sets @other_info and returns false, or - * awaits @other_info and returns true. If @other_info is not required for the - * report type, simply acquires @report_lock and returns true. - */ -static noinline bool prepare_report(unsigned long *flags, - enum kcsan_report_type type, - const struct access_info *ai, - struct other_info *other_info) +static struct access_info prepare_access_info(const volatile void *ptr, size_t size, + int access_type) { - switch (type) { - case KCSAN_REPORT_CONSUMED_WATCHPOINT: - prepare_report_producer(flags, ai, other_info); - return false; - case KCSAN_REPORT_RACE_SIGNAL: - return prepare_report_consumer(flags, ai, other_info); - default: - /* @other_info not required; just acquire @report_lock. */ - raw_spin_lock_irqsave(&report_lock, *flags); - return true; - } -} - -void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, - enum kcsan_report_type type, int watchpoint_idx) -{ - unsigned long flags = 0; - const struct access_info ai = { + return (struct access_info) { .ptr = ptr, .size = size, .access_type = access_type, .task_pid = in_task() ? task_pid_nr(current) : -1, .cpu_id = raw_smp_processor_id() }; - struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN - ? NULL : &other_infos[watchpoint_idx]; +} + +void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, + int watchpoint_idx) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + unsigned long flags; kcsan_disable_current(); - if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos))) - goto out; + lockdep_off(); /* See kcsan_report_known_origin(). */ + + prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]); + + lockdep_on(); + kcsan_enable_current(); +} + +void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, + enum kcsan_value_change value_change, int watchpoint_idx, + u64 old, u64 new, u64 mask) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + struct other_info *other_info = &other_infos[watchpoint_idx]; + unsigned long flags = 0; + kcsan_disable_current(); /* * Because we may generate reports when we're in scheduler code, the use * of printk() could deadlock. Until such time that all printing code @@ -626,22 +620,35 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, */ lockdep_off(); - if (prepare_report(&flags, type, &ai, other_info)) { - /* - * Never report if value_change is FALSE, only if we it is - * either TRUE or MAYBE. In case of MAYBE, further filtering may - * be done once we know the full stack trace in print_report(). - */ - bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && - print_report(value_change, type, &ai, other_info); + if (!prepare_report_consumer(&flags, &ai, other_info)) + goto out; + /* + * Never report if value_change is FALSE, only when it is + * either TRUE or MAYBE. In case of MAYBE, further filtering may + * be done once we know the full stack trace in print_report(). + */ + if (value_change != KCSAN_VALUE_CHANGE_FALSE) + print_report(value_change, &ai, other_info, old, new, mask); - if (reported && panic_on_warn) - panic("panic_on_warn set ...\n"); + release_report(&flags, other_info); +out: + lockdep_on(); + kcsan_enable_current(); +} - release_report(&flags, other_info); - } +void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, + u64 old, u64 new, u64 mask) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + unsigned long flags; + + kcsan_disable_current(); + lockdep_off(); /* See kcsan_report_known_origin(). */ + + raw_spin_lock_irqsave(&report_lock, flags); + print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); + raw_spin_unlock_irqrestore(&report_lock, flags); lockdep_on(); -out: kcsan_enable_current(); } |