summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-07 07:15:42 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-07 07:15:42 +0100
commita2e5790d841658485d642196dbb0927303d6c22f (patch)
treeb3d28c9bcb7da6880806146fd22a88a7ee7f733e /lib
parentMerge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/k... (diff)
parentDocumentation/sysctl/user.txt: fix typo (diff)
downloadlinux-a2e5790d841658485d642196dbb0927303d6c22f.tar.xz
linux-a2e5790d841658485d642196dbb0927303d6c22f.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - kasan updates - procfs - lib/bitmap updates - other lib/ updates - checkpatch tweaks - rapidio - ubsan - pipe fixes and cleanups - lots of other misc bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits) Documentation/sysctl/user.txt: fix typo MAINTAINERS: update ARM/QUALCOMM SUPPORT patterns MAINTAINERS: update various PALM patterns MAINTAINERS: update "ARM/OXNAS platform support" patterns MAINTAINERS: update Cortina/Gemini patterns MAINTAINERS: remove ARM/CLKDEV SUPPORT file pattern MAINTAINERS: remove ANDROID ION pattern mm: docs: add blank lines to silence sphinx "Unexpected indentation" errors mm: docs: fix parameter names mismatch mm: docs: fixup punctuation pipe: read buffer limits atomically pipe: simplify round_pipe_size() pipe: reject F_SETPIPE_SZ with size over UINT_MAX pipe: fix off-by-one error when checking buffer limits pipe: actually allow root to exceed the pipe buffer limits pipe, sysctl: remove pipe_proc_fn() pipe, sysctl: drop 'min' parameter from pipe-max-size converter kasan: rework Kconfig settings crash_dump: is_kdump_kernel can be boolean kernel/mutex: mutex_is_locked can be boolean ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap.c137
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/find_bit.c59
-rw-r--r--lib/find_bit_benchmark.c (renamed from lib/test_find_bit.c)70
-rw-r--r--lib/stackdepot.c19
-rw-r--r--lib/test_bitmap.c295
-rw-r--r--lib/test_kasan.c107
-rw-r--r--lib/test_sort.c6
-rw-r--r--lib/ubsan.c64
-rw-r--r--lib/ubsan.h17
13 files changed, 435 insertions, 372 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64d7c19d3167..b66c264d4194 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK
config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
- default 0 if KASAN
+ default 3072 if KASAN_EXTRA
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)
@@ -1641,7 +1641,10 @@ config DMA_API_DEBUG
If unsure, say N.
-menu "Runtime Testing"
+menuconfig RUNTIME_TESTING_MENU
+ bool "Runtime Testing"
+
+if RUNTIME_TESTING_MENU
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
@@ -1841,7 +1844,7 @@ config TEST_BPF
If unsure, say N.
-config TEST_FIND_BIT
+config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
default n
help
@@ -1929,7 +1932,7 @@ config TEST_DEBUG_VIRTUAL
If unsure, say N.
-endmenu # runtime tests
+endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index bd38aab05929..3d35d062970d 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -20,6 +20,17 @@ config KASAN
Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
+config KASAN_EXTRA
+ bool "KAsan: extra checks"
+ depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
+ help
+ This enables further checks in the kernel address sanitizer, for now
+ it only includes the address-use-after-scope check that can lead
+ to excessive kernel stack usage, frame size warnings and longer
+ compile time.
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
+
+
choice
prompt "Instrumentation type"
depends on KASAN
diff --git a/lib/Makefile b/lib/Makefile
index 7adb066692b3..a90d4fcd748f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,8 +46,8 @@ obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
+obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
-obj-$(CONFIG_TEST_FIND_BIT) += test_find_bit.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index d8f0c094b18e..9e498c77ed0e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -1106,111 +1106,80 @@ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
EXPORT_SYMBOL(bitmap_allocate_region);
/**
- * bitmap_from_u32array - copy the contents of a u32 array of bits to bitmap
- * @bitmap: array of unsigned longs, the destination bitmap, non NULL
- * @nbits: number of bits in @bitmap
- * @buf: array of u32 (in host byte order), the source bitmap, non NULL
- * @nwords: number of u32 words in @buf
- *
- * copy min(nbits, 32*nwords) bits from @buf to @bitmap, remaining
- * bits between nword and nbits in @bitmap (if any) are cleared. In
- * last word of @bitmap, the bits beyond nbits (if any) are kept
- * unchanged.
+ * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
+ * @dst: destination buffer
+ * @src: bitmap to copy
+ * @nbits: number of bits in the bitmap
*
- * Return the number of bits effectively copied.
+ * Require nbits % BITS_PER_LONG == 0.
*/
-unsigned int
-bitmap_from_u32array(unsigned long *bitmap, unsigned int nbits,
- const u32 *buf, unsigned int nwords)
+#ifdef __BIG_ENDIAN
+void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
- unsigned int dst_idx, src_idx;
-
- for (src_idx = dst_idx = 0; dst_idx < BITS_TO_LONGS(nbits); ++dst_idx) {
- unsigned long part = 0;
-
- if (src_idx < nwords)
- part = buf[src_idx++];
-
-#if BITS_PER_LONG == 64
- if (src_idx < nwords)
- part |= ((unsigned long) buf[src_idx++]) << 32;
-#endif
-
- if (dst_idx < nbits/BITS_PER_LONG)
- bitmap[dst_idx] = part;
- else {
- unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
+ unsigned int i;
- bitmap[dst_idx] = (bitmap[dst_idx] & ~mask)
- | (part & mask);
- }
+ for (i = 0; i < nbits/BITS_PER_LONG; i++) {
+ if (BITS_PER_LONG == 64)
+ dst[i] = cpu_to_le64(src[i]);
+ else
+ dst[i] = cpu_to_le32(src[i]);
}
-
- return min_t(unsigned int, nbits, 32*nwords);
}
-EXPORT_SYMBOL(bitmap_from_u32array);
+EXPORT_SYMBOL(bitmap_copy_le);
+#endif
+#if BITS_PER_LONG == 64
/**
- * bitmap_to_u32array - copy the contents of bitmap to a u32 array of bits
- * @buf: array of u32 (in host byte order), the dest bitmap, non NULL
- * @nwords: number of u32 words in @buf
- * @bitmap: array of unsigned longs, the source bitmap, non NULL
+ * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
- *
- * copy min(nbits, 32*nwords) bits from @bitmap to @buf. Remaining
- * bits after nbits in @buf (if any) are cleared.
- *
- * Return the number of bits effectively copied.
*/
-unsigned int
-bitmap_to_u32array(u32 *buf, unsigned int nwords,
- const unsigned long *bitmap, unsigned int nbits)
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+ unsigned int nbits)
{
- unsigned int dst_idx = 0, src_idx = 0;
-
- while (dst_idx < nwords) {
- unsigned long part = 0;
-
- if (src_idx < BITS_TO_LONGS(nbits)) {
- part = bitmap[src_idx];
- if (src_idx >= nbits/BITS_PER_LONG)
- part &= BITMAP_LAST_WORD_MASK(nbits);
- src_idx++;
- }
+ unsigned int i, halfwords;
- buf[dst_idx++] = part & 0xffffffffUL;
+ if (!nbits)
+ return;
-#if BITS_PER_LONG == 64
- if (dst_idx < nwords) {
- part >>= 32;
- buf[dst_idx++] = part & 0xffffffffUL;
- }
-#endif
+ halfwords = DIV_ROUND_UP(nbits, 32);
+ for (i = 0; i < halfwords; i++) {
+ bitmap[i/2] = (unsigned long) buf[i];
+ if (++i < halfwords)
+ bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
}
- return min_t(unsigned int, nbits, 32*nwords);
+ /* Clear tail bits in last word beyond nbits. */
+ if (nbits % BITS_PER_LONG)
+ bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
}
-EXPORT_SYMBOL(bitmap_to_u32array);
+EXPORT_SYMBOL(bitmap_from_arr32);
/**
- * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
- * @dst: destination buffer
- * @src: bitmap to copy
- * @nbits: number of bits in the bitmap
- *
- * Require nbits % BITS_PER_LONG == 0.
+ * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
+ * @buf: array of u32 (in host byte order), the dest bitmap
+ * @bitmap: array of unsigned longs, the source bitmap
+ * @nbits: number of bits in @bitmap
*/
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
- unsigned int i;
+ unsigned int i, halfwords;
- for (i = 0; i < nbits/BITS_PER_LONG; i++) {
- if (BITS_PER_LONG == 64)
- dst[i] = cpu_to_le64(src[i]);
- else
- dst[i] = cpu_to_le32(src[i]);
+ if (!nbits)
+ return;
+
+ halfwords = DIV_ROUND_UP(nbits, 32);
+ for (i = 0; i < halfwords; i++) {
+ buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
+ if (++i < halfwords)
+ buf[i] = (u32) (bitmap[i/2] >> 32);
}
+
+ /* Clear tail bits in last element of array beyond nbits. */
+ if (nbits % BITS_PER_LONG)
+ buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
}
-EXPORT_SYMBOL(bitmap_copy_le);
+EXPORT_SYMBOL(bitmap_to_arr32);
+
#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 35fe142ebb5e..beca6244671a 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -33,10 +33,11 @@ EXPORT_SYMBOL(cpumask_next);
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
- while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
- if (cpumask_test_cpu(n, src2p))
- break;
- return n;
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, n + 1);
}
EXPORT_SYMBOL(cpumask_next_and);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 6ed74f78380c..ee3df93ba69a 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -21,22 +21,29 @@
#include <linux/export.h>
#include <linux/kernel.h>
-#if !defined(find_next_bit) || !defined(find_next_zero_bit)
+#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
+ !defined(find_next_and_bit)
/*
- * This is a common helper function for find_next_bit and
- * find_next_zero_bit. The difference is the "invert" argument, which
- * is XORed with each fetched word before searching it for one bits.
+ * This is a common helper function for find_next_bit, find_next_zero_bit, and
+ * find_next_and_bit. The differences are:
+ * - The "invert" argument, which is XORed with each fetched word before
+ * searching it for one bits.
+ * - The optional "addr2", which is anded with "addr1" if present.
*/
-static unsigned long _find_next_bit(const unsigned long *addr,
- unsigned long nbits, unsigned long start, unsigned long invert)
+static inline unsigned long _find_next_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+ unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (unlikely(start >= nbits))
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
@@ -47,7 +54,10 @@ static unsigned long _find_next_bit(const unsigned long *addr,
if (start >= nbits)
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
}
return min(start + __ffs(tmp), nbits);
@@ -61,7 +71,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
- return _find_next_bit(addr, size, offset, 0UL);
+ return _find_next_bit(addr, NULL, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit);
#endif
@@ -70,11 +80,21 @@ EXPORT_SYMBOL(find_next_bit);
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
- return _find_next_bit(addr, size, offset, ~0UL);
+ return _find_next_bit(addr, NULL, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
+#if !defined(find_next_and_bit)
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr1, addr2, size, offset, 0UL);
+}
+EXPORT_SYMBOL(find_next_and_bit);
+#endif
+
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
@@ -146,15 +166,19 @@ static inline unsigned long ext2_swab(const unsigned long y)
}
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
-static unsigned long _find_next_bit_le(const unsigned long *addr,
- unsigned long nbits, unsigned long start, unsigned long invert)
+static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+ unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (unlikely(start >= nbits))
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
/* Handle 1st word. */
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
@@ -165,7 +189,10 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
if (start >= nbits)
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
}
return min(start + __ffs(ext2_swab(tmp)), nbits);
@@ -176,7 +203,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
- return _find_next_bit_le(addr, size, offset, ~0UL);
+ return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
@@ -185,7 +212,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le);
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
- return _find_next_bit_le(addr, size, offset, 0UL);
+ return _find_next_bit_le(addr, NULL, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
diff --git a/lib/test_find_bit.c b/lib/find_bit_benchmark.c
index f4394a36f9aa..5985a25e6cbc 100644
--- a/lib/test_find_bit.c
+++ b/lib/find_bit_benchmark.c
@@ -35,6 +35,7 @@
#define SPARSE 500
static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
+static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata;
/*
* This is Schlemiel the Painter's algorithm. It should be called after
@@ -43,16 +44,15 @@ static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
static int __init test_find_first_bit(void *bitmap, unsigned long len)
{
unsigned long i, cnt;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
for (cnt = i = 0; i < len; cnt++) {
i = find_first_bit(bitmap, len);
__clear_bit(i, bitmap);
}
- cycles = get_cycles() - cycles;
- pr_err("find_first_bit:\t\t%llu cycles,\t%ld iterations\n",
- (u64)cycles, cnt);
+ time = ktime_get() - time;
+ pr_err("find_first_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
@@ -60,14 +60,13 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len)
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
- cycles = get_cycles() - cycles;
- pr_err("find_next_bit:\t\t%llu cycles,\t%ld iterations\n",
- (u64)cycles, cnt);
+ time = ktime_get() - time;
+ pr_err("find_next_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
@@ -75,14 +74,13 @@ static int __init test_find_next_bit(const void *bitmap, unsigned long len)
static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
i = find_next_zero_bit(bitmap, len, i) + 1;
- cycles = get_cycles() - cycles;
- pr_err("find_next_zero_bit:\t%llu cycles,\t%ld iterations\n",
- (u64)cycles, cnt);
+ time = ktime_get() - time;
+ pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
@@ -90,9 +88,9 @@ static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
static int __init test_find_last_bit(const void *bitmap, unsigned long len)
{
unsigned long l, cnt = 0;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
do {
cnt++;
l = find_last_bit(bitmap, len);
@@ -100,9 +98,24 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len)
break;
len = l;
} while (len);
+ time = ktime_get() - time;
+ pr_err("find_last_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_next_and_bit(const void *bitmap,
+ const void *bitmap2, unsigned long len)
+{
+ unsigned long i, cnt;
+ cycles_t cycles;
+
+ cycles = get_cycles();
+ for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+ i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1);
cycles = get_cycles() - cycles;
- pr_err("find_last_bit:\t\t%llu cycles,\t%ld iterations\n",
- (u64)cycles, cnt);
+ pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n",
+ (u64)cycles, cnt);
return 0;
}
@@ -114,31 +127,36 @@ static int __init find_bit_test(void)
pr_err("\nStart testing find_bit() with random-filled bitmap\n");
get_random_bytes(bitmap, sizeof(bitmap));
+ get_random_bytes(bitmap2, sizeof(bitmap2));
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
+ test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
bitmap_zero(bitmap, BITMAP_LEN);
+ bitmap_zero(bitmap2, BITMAP_LEN);
- while (nbits--)
+ while (nbits--) {
__set_bit(prandom_u32() % BITMAP_LEN, bitmap);
+ __set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
+ }
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
+ test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
- return 0;
+ /*
+ * Everything is OK. Return error just to let user run benchmark
+ * again without annoying rmmod.
+ */
+ return -EINVAL;
}
module_init(find_bit_test);
-static void __exit test_find_bit_cleanup(void)
-{
-}
-module_exit(test_find_bit_cleanup);
-
MODULE_LICENSE("GPL");
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index f87d138e9672..e513459a5601 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -163,6 +163,21 @@ static inline u32 hash_stack(unsigned long *entries, unsigned int size)
STACK_HASH_SEED);
}
+/* Use our own, non-instrumented version of memcmp().
+ *
+ * We actually don't care about the order, just the equality.
+ */
+static inline
+int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
+ unsigned int n)
+{
+ for ( ; n-- ; u1++, u2++) {
+ if (*u1 != *u2)
+ return 1;
+ }
+ return 0;
+}
+
/* Find a stack that is equal to the one stored in entries in the hash */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
@@ -173,10 +188,8 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
for (found = bucket; found; found = found->next) {
if (found->hash == hash &&
found->size == size &&
- !memcmp(entries, found->entries,
- size * sizeof(unsigned long))) {
+ !stackdepot_memcmp(entries, found->entries, size))
return found;
- }
}
return NULL;
}
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index aa1f2669bdd5..b3f235baa05d 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -23,7 +23,7 @@ __check_eq_uint(const char *srcfile, unsigned int line,
const unsigned int exp_uint, unsigned int x)
{
if (exp_uint != x) {
- pr_warn("[%s:%u] expected %u, got %u\n",
+ pr_err("[%s:%u] expected %u, got %u\n",
srcfile, line, exp_uint, x);
return false;
}
@@ -33,19 +33,13 @@ __check_eq_uint(const char *srcfile, unsigned int line,
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
- const unsigned long *exp_bmap, unsigned int exp_nbits,
- const unsigned long *bmap, unsigned int nbits)
+ const unsigned long *exp_bmap, const unsigned long *bmap,
+ unsigned int nbits)
{
- if (exp_nbits != nbits) {
- pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n",
- srcfile, line, exp_nbits, nbits);
- return false;
- }
-
if (!bitmap_equal(exp_bmap, bmap, nbits)) {
pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n",
srcfile, line,
- exp_nbits, exp_bmap, nbits, bmap);
+ nbits, exp_bmap, nbits, bmap);
return false;
}
return true;
@@ -69,6 +63,10 @@ __check_eq_pbl(const char *srcfile, unsigned int line,
static bool __init
__check_eq_u32_array(const char *srcfile, unsigned int line,
const u32 *exp_arr, unsigned int exp_len,
+ const u32 *arr, unsigned int len) __used;
+static bool __init
+__check_eq_u32_array(const char *srcfile, unsigned int line,
+ const u32 *exp_arr, unsigned int exp_len,
const u32 *arr, unsigned int len)
{
if (exp_len != len) {
@@ -107,7 +105,65 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
-static void __init test_zero_fill_copy(void)
+static void __init test_zero_clear(void)
+{
+ DECLARE_BITMAP(bmap, 1024);
+
+ /* Known way to set all bits */
+ memset(bmap, 0xff, 128);
+
+ expect_eq_pbl("0-22", bmap, 23);
+ expect_eq_pbl("0-1023", bmap, 1024);
+
+ /* single-word bitmaps */
+ bitmap_clear(bmap, 0, 9);
+ expect_eq_pbl("9-1023", bmap, 1024);
+
+ bitmap_zero(bmap, 35);
+ expect_eq_pbl("64-1023", bmap, 1024);
+
+ /* cross boundaries operations */
+ bitmap_clear(bmap, 79, 19);
+ expect_eq_pbl("64-78,98-1023", bmap, 1024);
+
+ bitmap_zero(bmap, 115);
+ expect_eq_pbl("128-1023", bmap, 1024);
+
+ /* Zeroing entire area */
+ bitmap_zero(bmap, 1024);
+ expect_eq_pbl("", bmap, 1024);
+}
+
+static void __init test_fill_set(void)
+{
+ DECLARE_BITMAP(bmap, 1024);
+
+ /* Known way to clear all bits */
+ memset(bmap, 0x00, 128);
+
+ expect_eq_pbl("", bmap, 23);
+ expect_eq_pbl("", bmap, 1024);
+
+ /* single-word bitmaps */
+ bitmap_set(bmap, 0, 9);
+ expect_eq_pbl("0-8", bmap, 1024);
+
+ bitmap_fill(bmap, 35);
+ expect_eq_pbl("0-63", bmap, 1024);
+
+ /* cross boundaries operations */
+ bitmap_set(bmap, 79, 19);
+ expect_eq_pbl("0-63,79-97", bmap, 1024);
+
+ bitmap_fill(bmap, 115);
+ expect_eq_pbl("0-127", bmap, 1024);
+
+ /* Zeroing entire area */
+ bitmap_fill(bmap, 1024);
+ expect_eq_pbl("0-1023", bmap, 1024);
+}
+
+static void __init test_copy(void)
{
DECLARE_BITMAP(bmap1, 1024);
DECLARE_BITMAP(bmap2, 1024);
@@ -116,36 +172,20 @@ static void __init test_zero_fill_copy(void)
bitmap_zero(bmap2, 1024);
/* single-word bitmaps */
- expect_eq_pbl("", bmap1, 23);
-
- bitmap_fill(bmap1, 19);
- expect_eq_pbl("0-18", bmap1, 1024);
-
+ bitmap_set(bmap1, 0, 19);
bitmap_copy(bmap2, bmap1, 23);
expect_eq_pbl("0-18", bmap2, 1024);
- bitmap_fill(bmap2, 23);
- expect_eq_pbl("0-22", bmap2, 1024);
-
+ bitmap_set(bmap2, 0, 23);
bitmap_copy(bmap2, bmap1, 23);
expect_eq_pbl("0-18", bmap2, 1024);
- bitmap_zero(bmap1, 23);
- expect_eq_pbl("", bmap1, 1024);
-
/* multi-word bitmaps */
- bitmap_zero(bmap1, 1024);
- expect_eq_pbl("", bmap1, 1024);
-
- bitmap_fill(bmap1, 109);
- expect_eq_pbl("0-108", bmap1, 1024);
-
+ bitmap_set(bmap1, 0, 109);
bitmap_copy(bmap2, bmap1, 1024);
expect_eq_pbl("0-108", bmap2, 1024);
bitmap_fill(bmap2, 1024);
- expect_eq_pbl("0-1023", bmap2, 1024);
-
bitmap_copy(bmap2, bmap1, 1024);
expect_eq_pbl("0-108", bmap2, 1024);
@@ -160,9 +200,6 @@ static void __init test_zero_fill_copy(void)
bitmap_fill(bmap2, 1024);
bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
-
- bitmap_zero(bmap2, 97); /* ... but 0-padded til word length */
- expect_eq_pbl("128-1023", bmap2, 1024);
}
#define PARSE_TIME 0x1
@@ -255,171 +292,29 @@ static void __init test_bitmap_parselist(void)
}
}
-static void __init test_bitmap_u32_array_conversions(void)
+static void __init test_bitmap_arr32(void)
{
- DECLARE_BITMAP(bmap1, 1024);
- DECLARE_BITMAP(bmap2, 1024);
- u32 exp_arr[32], arr[32];
- unsigned nbits;
-
- for (nbits = 0 ; nbits < 257 ; ++nbits) {
- const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32);
- unsigned int i, rv;
-
- bitmap_zero(bmap1, nbits);
- bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */
-
- memset(arr, 0xff, sizeof(arr));
- rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits);
- expect_eq_uint(nbits, rv);
-
- memset(exp_arr, 0xff, sizeof(exp_arr));
- memset(exp_arr, 0, used_u32s*sizeof(*exp_arr));
- expect_eq_u32_array(exp_arr, 32, arr, 32);
-
- bitmap_fill(bmap2, 1024);
- rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s);
- expect_eq_uint(nbits, rv);
- expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
-
- for (i = 0 ; i < nbits ; ++i) {
- /*
- * test conversion bitmap -> u32[]
- */
-
- bitmap_zero(bmap1, 1024);
- __set_bit(i, bmap1);
- bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */
-
- memset(arr, 0xff, sizeof(arr));
- rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits);
- expect_eq_uint(nbits, rv);
-
- /* 1st used u32 words contain expected bit set, the
- * remaining words are left unchanged (0xff)
- */
- memset(exp_arr, 0xff, sizeof(exp_arr));
- memset(exp_arr, 0, used_u32s*sizeof(*exp_arr));
- exp_arr[i/32] = (1U<<(i%32));
- expect_eq_u32_array(exp_arr, 32, arr, 32);
-
-
- /* same, with longer array to fill
- */
- memset(arr, 0xff, sizeof(arr));
- rv = bitmap_to_u32array(arr, 32, bmap1, nbits);
- expect_eq_uint(nbits, rv);
-
- /* 1st used u32 words contain expected bit set, the
- * remaining words are all 0s
- */
- memset(exp_arr, 0, sizeof(exp_arr));
- exp_arr[i/32] = (1U<<(i%32));
- expect_eq_u32_array(exp_arr, 32, arr, 32);
-
- /*
- * test conversion u32[] -> bitmap
- */
-
- /* the 1st nbits of bmap2 are identical to
- * bmap1, the remaining bits of bmap2 are left
- * unchanged (all 1s)
- */
- bitmap_fill(bmap2, 1024);
- rv = bitmap_from_u32array(bmap2, nbits,
- exp_arr, used_u32s);
- expect_eq_uint(nbits, rv);
-
- expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
-
- /* same, with more bits to fill
- */
- memset(arr, 0xff, sizeof(arr)); /* garbage */
- memset(arr, 0, used_u32s*sizeof(u32));
- arr[i/32] = (1U<<(i%32));
-
- bitmap_fill(bmap2, 1024);
- rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s);
- expect_eq_uint(used_u32s*32, rv);
-
- /* the 1st nbits of bmap2 are identical to
- * bmap1, the remaining bits of bmap2 are cleared
- */
- bitmap_zero(bmap1, 1024);
- __set_bit(i, bmap1);
- expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
-
-
- /*
- * test short conversion bitmap -> u32[] (1
- * word too short)
- */
- if (used_u32s > 1) {
- bitmap_zero(bmap1, 1024);
- __set_bit(i, bmap1);
- bitmap_set(bmap1, nbits,
- 1024 - nbits); /* garbage */
- memset(arr, 0xff, sizeof(arr));
-
- rv = bitmap_to_u32array(arr, used_u32s - 1,
- bmap1, nbits);
- expect_eq_uint((used_u32s - 1)*32, rv);
-
- /* 1st used u32 words contain expected
- * bit set, the remaining words are
- * left unchanged (0xff)
- */
- memset(exp_arr, 0xff, sizeof(exp_arr));
- memset(exp_arr, 0,
- (used_u32s-1)*sizeof(*exp_arr));
- if ((i/32) < (used_u32s - 1))
- exp_arr[i/32] = (1U<<(i%32));
- expect_eq_u32_array(exp_arr, 32, arr, 32);
- }
-
- /*
- * test short conversion u32[] -> bitmap (3
- * bits too short)
- */
- if (nbits > 3) {
- memset(arr, 0xff, sizeof(arr)); /* garbage */
- memset(arr, 0, used_u32s*sizeof(*arr));
- arr[i/32] = (1U<<(i%32));
-
- bitmap_zero(bmap1, 1024);
- rv = bitmap_from_u32array(bmap1, nbits - 3,
- arr, used_u32s);
- expect_eq_uint(nbits - 3, rv);
-
- /* we are expecting the bit < nbits -
- * 3 (none otherwise), and the rest of
- * bmap1 unchanged (0-filled)
- */
- bitmap_zero(bmap2, 1024);
- if (i < nbits - 3)
- __set_bit(i, bmap2);
- expect_eq_bitmap(bmap2, 1024, bmap1, 1024);
-
- /* do the same with bmap1 initially
- * 1-filled
- */
-
- bitmap_fill(bmap1, 1024);
- rv = bitmap_from_u32array(bmap1, nbits - 3,
- arr, used_u32s);
- expect_eq_uint(nbits - 3, rv);
-
- /* we are expecting the bit < nbits -
- * 3 (none otherwise), and the rest of
- * bmap1 unchanged (1-filled)
- */
- bitmap_zero(bmap2, 1024);
- if (i < nbits - 3)
- __set_bit(i, bmap2);
- bitmap_set(bmap2, nbits-3, 1024 - nbits + 3);
- expect_eq_bitmap(bmap2, 1024, bmap1, 1024);
- }
- }
+ unsigned int nbits, next_bit, len = sizeof(exp) * 8;
+ u32 arr[sizeof(exp) / 4];
+ DECLARE_BITMAP(bmap2, len);
+
+ memset(arr, 0xa5, sizeof(arr));
+
+ for (nbits = 0; nbits < len; ++nbits) {
+ bitmap_to_arr32(arr, exp, nbits);
+ bitmap_from_arr32(bmap2, arr, nbits);
+ expect_eq_bitmap(bmap2, exp, nbits);
+
+ next_bit = find_next_bit(bmap2,
+ round_up(nbits, BITS_PER_LONG), nbits);
+ if (next_bit < round_up(nbits, BITS_PER_LONG))
+ pr_err("bitmap_copy_arr32(nbits == %d:"
+ " tail is not safely cleared: %d\n",
+ nbits, next_bit);
+
+ if (nbits < len - 32)
+ expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
+ 0xa5a5a5a5);
}
}
@@ -453,8 +348,10 @@ static void noinline __init test_mem_optimisations(void)
static int __init test_bitmap_init(void)
{
- test_zero_fill_copy();
- test_bitmap_u32_array_conversions();
+ test_zero_clear();
+ test_fill_set();
+ test_copy();
+ test_bitmap_arr32();
test_bitmap_parselist();
test_mem_optimisations();
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ef1a3ac1397e..98854a64b014 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -94,6 +94,37 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
ptr[size] = 0;
kfree(ptr);
}
+
+static noinline void __init kmalloc_pagealloc_uaf(void)
+{
+ char *ptr;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+
+ pr_info("kmalloc pagealloc allocation: use-after-free\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kfree(ptr);
+ ptr[0] = 0;
+}
+
+static noinline void __init kmalloc_pagealloc_invalid_free(void)
+{
+ char *ptr;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+
+ pr_info("kmalloc pagealloc allocation: invalid-free\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kfree(ptr + 1);
+}
#endif
static noinline void __init kmalloc_large_oob_right(void)
@@ -388,7 +419,7 @@ static noinline void __init kasan_stack_oob(void)
static noinline void __init ksize_unpoisons_memory(void)
{
char *ptr;
- size_t size = 123, real_size = size;
+ size_t size = 123, real_size;
pr_info("ksize() unpoisons the whole allocated chunk\n");
ptr = kmalloc(size, GFP_KERNEL);
@@ -472,6 +503,74 @@ static noinline void __init use_after_scope_test(void)
p[1023] = 1;
}
+static noinline void __init kasan_alloca_oob_left(void)
+{
+ volatile int i = 10;
+ char alloca_array[i];
+ char *p = alloca_array - 1;
+
+ pr_info("out-of-bounds to left on alloca\n");
+ *(volatile char *)p;
+}
+
+static noinline void __init kasan_alloca_oob_right(void)
+{
+ volatile int i = 10;
+ char alloca_array[i];
+ char *p = alloca_array + i;
+
+ pr_info("out-of-bounds to right on alloca\n");
+ *(volatile char *)p;
+}
+
+static noinline void __init kmem_cache_double_free(void)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ if (!cache) {
+ pr_err("Cache allocation failed\n");
+ return;
+ }
+ pr_info("double-free on heap object\n");
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Allocation failed\n");
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ kmem_cache_free(cache, p);
+ kmem_cache_free(cache, p);
+ kmem_cache_destroy(cache);
+}
+
+static noinline void __init kmem_cache_invalid_free(void)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ if (!cache) {
+ pr_err("Cache allocation failed\n");
+ return;
+ }
+ pr_info("invalid-free of heap object\n");
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Allocation failed\n");
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ kmem_cache_free(cache, p + 1);
+ kmem_cache_destroy(cache);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -485,6 +584,8 @@ static int __init kmalloc_tests_init(void)
kmalloc_node_oob_right();
#ifdef CONFIG_SLUB
kmalloc_pagealloc_oob_right();
+ kmalloc_pagealloc_uaf();
+ kmalloc_pagealloc_invalid_free();
#endif
kmalloc_large_oob_right();
kmalloc_oob_krealloc_more();
@@ -502,9 +603,13 @@ static int __init kmalloc_tests_init(void)
memcg_accounted_kmem_cache();
kasan_stack_oob();
kasan_global_oob();
+ kasan_alloca_oob_left();
+ kasan_alloca_oob_right();
ksize_unpoisons_memory();
copy_user_test();
use_after_scope_test();
+ kmem_cache_double_free();
+ kmem_cache_invalid_free();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_sort.c b/lib/test_sort.c
index d389c1cc2f6c..385c0ed5202f 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -39,5 +39,11 @@ exit:
return err;
}
+static void __exit test_sort_exit(void)
+{
+}
+
module_init(test_sort_init);
+module_exit(test_sort_exit);
+
MODULE_LICENSE("GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
index fb0409df1bcf..59fee96c29a0 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -141,11 +141,6 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static bool location_is_valid(struct source_location *loc)
-{
- return loc->file_name != NULL;
-}
-
static DEFINE_SPINLOCK(report_lock);
static void ubsan_prologue(struct source_location *location,
@@ -265,14 +260,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
-static void handle_null_ptr_deref(struct type_mismatch_data *data)
+static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
{
unsigned long flags;
- if (suppress_report(&data->location))
+ if (suppress_report(data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(data->location, &flags);
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
@@ -281,15 +276,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data)
ubsan_epilogue(&flags);
}
-static void handle_missaligned_access(struct type_mismatch_data *data,
+static void handle_misaligned_access(struct type_mismatch_data_common *data,
unsigned long ptr)
{
unsigned long flags;
- if (suppress_report(&data->location))
+ if (suppress_report(data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(data->location, &flags);
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
@@ -299,15 +294,15 @@ static void handle_missaligned_access(struct type_mismatch_data *data,
ubsan_epilogue(&flags);
}
-static void handle_object_size_mismatch(struct type_mismatch_data *data,
+static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
unsigned long ptr)
{
unsigned long flags;
- if (suppress_report(&data->location))
+ if (suppress_report(data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(data->location, &flags);
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
@@ -315,37 +310,46 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data,
ubsan_epilogue(&flags);
}
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
+static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
unsigned long ptr)
{
if (!ptr)
handle_null_ptr_deref(data);
else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
- handle_missaligned_access(data, ptr);
+ handle_misaligned_access(data, ptr);
else
handle_object_size_mismatch(data, ptr);
}
-EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
-void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
+void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
+ unsigned long ptr)
{
- unsigned long flags;
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, &flags);
+ struct type_mismatch_data_common common_data = {
+ .location = &data->location,
+ .type = data->type,
+ .alignment = data->alignment,
+ .type_check_kind = data->type_check_kind
+ };
+
+ ubsan_type_mismatch_common(&common_data, ptr);
+}
+EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
- pr_err("null pointer returned from function declared to never return null\n");
+void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
+ unsigned long ptr)
+{
- if (location_is_valid(&data->attr_location))
- print_source_location("returns_nonnull attribute specified in",
- &data->attr_location);
+ struct type_mismatch_data_common common_data = {
+ .location = &data->location,
+ .type = data->type,
+ .alignment = 1UL << data->log_alignment,
+ .type_check_kind = data->type_check_kind
+ };
- ubsan_epilogue(&flags);
+ ubsan_type_mismatch_common(&common_data, ptr);
}
-EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
+EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
unsigned long bound)
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 88f23557edbe..f4d8d0bd4016 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -37,15 +37,24 @@ struct type_mismatch_data {
unsigned char type_check_kind;
};
-struct nonnull_arg_data {
+struct type_mismatch_data_v1 {
struct source_location location;
- struct source_location attr_location;
- int arg_index;
+ struct type_descriptor *type;
+ unsigned char log_alignment;
+ unsigned char type_check_kind;
+};
+
+struct type_mismatch_data_common {
+ struct source_location *location;
+ struct type_descriptor *type;
+ unsigned long alignment;
+ unsigned char type_check_kind;
};
-struct nonnull_return_data {
+struct nonnull_arg_data {
struct source_location location;
struct source_location attr_location;
+ int arg_index;
};
struct vla_bound_data {