diff options
author | Andrey Konovalov <andreyknvl@google.com> | 2021-02-26 02:19:55 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-26 18:41:02 +0100 |
commit | 928501344fc645f80390afc12708c81b3595745d (patch) | |
tree | 98e66961054a59acab4663ee4249eaa33f7c1535 | |
parent | kasan: use error_report_end tracepoint (diff) | |
download | linux-928501344fc645f80390afc12708c81b3595745d.tar.xz linux-928501344fc645f80390afc12708c81b3595745d.zip |
kasan, mm: don't save alloc stacks twice
Patch series "kasan: optimizations and fixes for HW_TAGS", v4.
This patchset makes the HW_TAGS mode more efficient, mostly by reworking
poisoning approaches and simplifying/inlining some internal helpers.
With this change, the overhead of HW_TAGS annotations excluding setting
and checking memory tags is ~3%. The performance impact caused by tags
will be unknown until we have hardware that supports MTE.
As a side-effect, this patchset speeds up generic KASAN by ~15%.
This patch (of 13):
Currently KASAN saves allocation stacks in both kasan_slab_alloc() and
kasan_kmalloc() annotations. This patch changes KASAN to save allocation
stacks for slab objects from kmalloc caches in kasan_kmalloc() only, and
stacks for other slab objects in kasan_slab_alloc() only.
This change requires ____kasan_kmalloc() knowing whether the object
belongs to a kmalloc cache. This is implemented by adding a flag field to
the kasan_info structure. That flag is only set for kmalloc caches via a
new kasan_cache_create_kmalloc() annotation.
Link: https://lkml.kernel.org/r/cover.1612546384.git.andreyknvl@google.com
Link: https://lkml.kernel.org/r/7c673ebca8d00f40a7ad6f04ab9a2bddeeae2097.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/kasan.h | 9 | ||||
-rw-r--r-- | mm/kasan/common.c | 18 | ||||
-rw-r--r-- | mm/slab_common.c | 1 |
3 files changed, 24 insertions, 4 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7eaf2d9effb4..3fb31e8a353e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {} struct kasan_cache { int alloc_meta_offset; int free_meta_offset; + bool is_kmalloc; }; #ifdef CONFIG_KASAN_HW_TAGS @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache, __kasan_cache_create(cache, size, flags); } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache); +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + if (kasan_enabled()) + __kasan_cache_create_kmalloc(cache); +} + size_t __kasan_metadata_size(struct kmem_cache *cache); static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) { @@ -278,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, diff --git a/mm/kasan/common.c b/mm/kasan/common.c index af1768c4fee5..d8d83ca56fe2 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, *size = optimal_size; } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + cache->kasan_info.is_kmalloc = true; +} + size_t __kasan_metadata_size(struct kmem_cache *cache) { if (!kasan_stack_collection_enabled()) @@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) } } -static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) +static void set_alloc_info(struct kmem_cache *cache, void *object, + gfp_t flags, bool is_kmalloc) { struct kasan_alloc_meta *alloc_meta; + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ + if (cache->kasan_info.is_kmalloc && !is_kmalloc) + return; + alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) kasan_set_track(&alloc_meta->alloc_track, flags); } static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags, bool keep_tag) + size_t size, gfp_t flags, bool is_kmalloc) { unsigned long redzone_start; unsigned long redzone_end; @@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_GRANULE_SIZE); redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_GRANULE_SIZE); - tag = assign_tag(cache, object, false, keep_tag); + tag = assign_tag(cache, object, false, is_kmalloc); /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */ kasan_unpoison(set_tag(object, tag), size); @@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_KMALLOC_REDZONE); if (kasan_stack_collection_enabled()) - set_alloc_info(cache, (void *)object, flags); + set_alloc_info(cache, (void *)object, flags, is_kmalloc); return set_tag(object, tag); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 284954ef1da5..897c3a446b04 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -643,6 +643,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, panic("Out of memory when creating slab %s\n", name); create_boot_cache(s, name, size, flags, useroffset, usersize); + kasan_cache_create_kmalloc(s); list_add(&s->list, &slab_caches); s->refcount = 1; return s; |