diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/generic-radix-tree.c | 32 | ||||
-rw-r--r-- | lib/string.c | 21 | ||||
-rw-r--r-- | lib/test_meminit.c | 27 | ||||
-rw-r--r-- | lib/vdso/Kconfig | 9 |
4 files changed, 53 insertions, 36 deletions
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index ae25e2fa2187..f25eb111c051 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -2,6 +2,7 @@ #include <linux/export.h> #include <linux/generic-radix-tree.h> #include <linux/gfp.h> +#include <linux/kmemleak.h> #define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) @@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset) } EXPORT_SYMBOL(__genradix_ptr); +static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) +{ + struct genradix_node *node; + + node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); + + /* + * We're using pages (not slab allocations) directly for kernel data + * structures, so we need to explicitly inform kmemleak of them in order + * to avoid false positive memory leak reports. + */ + kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); + return node; +} + +static inline void genradix_free_node(struct genradix_node *node) +{ + kmemleak_free(node); + free_page((unsigned long)node); +} + /* * Returns pointer to the specified byte @offset within @radix, allocating it if * necessary - newly allocated slots are always zeroed out: @@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, break; if (!new_node) { - new_node = (void *) - __get_free_page(gfp_mask|__GFP_ZERO); + new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } @@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, n = READ_ONCE(*p); if (!n) { if (!new_node) { - new_node = (void *) - __get_free_page(gfp_mask|__GFP_ZERO); + new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } @@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, } if (new_node) - free_page((unsigned long) new_node); + genradix_free_node(new_node); return &n->data[offset]; } @@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level) genradix_free_recurse(n->children[i], level - 1); } - free_page((unsigned long) n); + genradix_free_node(n); } int __genradix_prealloc(struct __genradix *radix, size_t size, diff --git a/lib/string.c b/lib/string.c index cd7a10c19210..08ec58cc673b 100644 --- a/lib/string.c +++ b/lib/string.c @@ -748,27 +748,6 @@ void *memset(void *s, int c, size_t count) EXPORT_SYMBOL(memset); #endif -/** - * memzero_explicit - Fill a region of memory (e.g. sensitive - * keying data) with 0s. - * @s: Pointer to the start of the area. - * @count: The size of the area. - * - * Note: usually using memset() is just fine (!), but in cases - * where clearing out _local_ data at the end of a scope is - * necessary, memzero_explicit() should be used instead in - * order to prevent the compiler from optimising away zeroing. - * - * memzero_explicit() doesn't need an arch-specific version as - * it just invokes the one of memset() implicitly. - */ -void memzero_explicit(void *s, size_t count) -{ - memset(s, 0, count); - barrier_data(s); -} -EXPORT_SYMBOL(memzero_explicit); - #ifndef __HAVE_ARCH_MEMSET16 /** * memset16() - Fill a memory area with a uint16_t diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 9729f271d150..9742e5cb853a 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -297,6 +297,32 @@ out: return 1; } +static int __init do_kmem_cache_size_bulk(int size, int *total_failures) +{ + struct kmem_cache *c; + int i, iter, maxiter = 1024; + int num, bytes; + bool fail = false; + void *objects[10]; + + c = kmem_cache_create("test_cache", size, size, 0, NULL); + for (iter = 0; (iter < maxiter) && !fail; iter++) { + num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), + objects); + for (i = 0; i < num; i++) { + bytes = count_nonzero_bytes(objects[i], size); + if (bytes) + fail = true; + fill_with_garbage(objects[i], size); + } + + if (num) + kmem_cache_free_bulk(c, num, objects); + } + *total_failures += fail; + return 1; +} + /* * Test kmem_cache allocation by creating caches of different sizes, with and * without constructors, with and without SLAB_TYPESAFE_BY_RCU. @@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures) num_tests += do_kmem_cache_size(size, ctor, rcu, zero, &failures); } + num_tests += do_kmem_cache_size_bulk(size, &failures); } REPORT_FAILURES_IN_FN(); *total_failures += failures; diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig index cc00364bd2c2..9fe698ff62ec 100644 --- a/lib/vdso/Kconfig +++ b/lib/vdso/Kconfig @@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO help This config option enables the compat VDSO layer. -config CROSS_COMPILE_COMPAT_VDSO - string "32 bit Toolchain prefix for compat vDSO" - default "" - depends on GENERIC_COMPAT_VDSO - help - Defines the cross-compiler prefix for compiling compat vDSO. - If a 64 bit compiler (i.e. x86_64) can compile the VDSO for - 32 bit, it does not need to define this parameter. - endif |