diff options
author | Peter Collingbourne <pcc@google.com> | 2022-05-10 03:20:53 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2022-05-13 16:20:07 +0200 |
commit | d949a8155d139aa890795b802004a196b7f00598 (patch) | |
tree | f4f44a8b94c40c132e6832035a8cd3a43adcc5e1 /mm/slob.c | |
parent | printk: stop including cache.h from printk.h (diff) | |
download | linux-d949a8155d139aa890795b802004a196b7f00598.tar.xz linux-d949a8155d139aa890795b802004a196b7f00598.zip |
mm: make minimum slab alignment a runtime property
When CONFIG_KASAN_HW_TAGS is enabled we currently increase the minimum
slab alignment to 16. This happens even if MTE is not supported in
hardware or disabled via kasan=off, which creates an unnecessary memory
overhead in those cases. Eliminate this overhead by making the minimum
slab alignment a runtime property and only aligning to 16 if KASAN is
enabled at runtime.
On a DragonBoard 845c (non-MTE hardware) with a kernel built with
CONFIG_KASAN_HW_TAGS, waiting for quiescence after a full Android boot I
see the following Slab measurements in /proc/meminfo (median of 3
reboots):
Before: 169020 kB
After: 167304 kB
[akpm@linux-foundation.org: make slab alignment type `unsigned int' to avoid casting]
Link: https://linux-review.googlesource.com/id/I752e725179b43b144153f4b6f584ceb646473ead
Link: https://lkml.kernel.org/r/20220427195820.1716975-2-pcc@google.com
Signed-off-by: Peter Collingbourne <pcc@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/mm/slob.c b/mm/slob.c index 40ea6e2d4ccd..f47811f09aca 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -478,9 +478,11 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; - int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + unsigned int minalign; void *ret; + minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); gfp &= gfp_allowed_mask; might_alloc(gfp); @@ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) * kmalloc()'d objects. */ if (is_power_of_2(size)) - align = max(minalign, (int) size); + align = max_t(unsigned int, minalign, size); if (!size) return ZERO_SIZE_PTR; @@ -555,8 +557,11 @@ void kfree(const void *block) sp = virt_to_folio(block); if (folio_test_slab(sp)) { - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + unsigned int align = max_t(unsigned int, + ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); unsigned int *m = (unsigned int *)(block - align); + slob_free(m, *m + align); } else { unsigned int order = folio_order(sp); @@ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree); size_t __ksize(const void *block) { struct folio *folio; - int align; + unsigned int align; unsigned int *m; BUG_ON(!block); @@ -584,7 +589,8 @@ size_t __ksize(const void *block) if (unlikely(!folio_test_slab(folio))) return folio_size(folio); - align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; } |