diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2019-10-05 08:33:31 +0200 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2022-05-02 04:51:22 +0200 |
commit | 725aea873261e8d986e527838fde2a721f0962d8 (patch) | |
tree | 904b4456d5cb34d2b6d57a41845c06c176d91270 /arch/xtensa/lib | |
parent | xtensa: enable HAVE_VIRT_CPU_ACCOUNTING_GEN (diff) | |
download | linux-725aea873261e8d986e527838fde2a721f0962d8.tar.xz linux-725aea873261e8d986e527838fde2a721f0962d8.zip |
xtensa: enable KCSAN
Prefix arch-specific barrier macros with '__' to make use of instrumented
generic macros.
Prefix arch-specific bitops with 'arch_' to make use of instrumented
generic functions.
Provide stubs for 64-bit atomics when building with KCSAN.
Disable KCSAN instrumentation in arch/xtensa/boot.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: Marco Elver <elver@google.com>
Diffstat (limited to 'arch/xtensa/lib')
-rw-r--r-- | arch/xtensa/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/xtensa/lib/kcsan-stubs.c | 54 |
2 files changed, 56 insertions, 0 deletions
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index 5848c133f7ea..d4e9c397e3fd 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile @@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \ usercopy.o strncpy_user.o strnlen_user.o lib-$(CONFIG_PCI) += pci-auto.o +lib-$(CONFIG_KCSAN) += kcsan-stubs.o +KCSAN_SANITIZE_kcsan-stubs.o := n diff --git a/arch/xtensa/lib/kcsan-stubs.c b/arch/xtensa/lib/kcsan-stubs.c new file mode 100644 index 000000000000..2b08faa62b86 --- /dev/null +++ b/arch/xtensa/lib/kcsan-stubs.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bug.h> +#include <linux/types.h> + +void __atomic_store_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_load_8(const volatile void *p, int i) +{ + BUG(); +} + +u64 __atomic_exchange_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2) +{ + BUG(); +} + +u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i) +{ + BUG(); +} + +u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i) +{ + BUG(); +} |