summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorDanilo Krummrich <dakr@kernel.org>2024-08-13 00:34:34 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-09-04 06:15:37 +0200
commit1a83a716ec233990e1fd5b6fbb1200ade63bf450 (patch)
tree3fbb1818e036cb279d08dd28806676d43918349e /mm/slab_common.c
parentmm/rmap: use folio->_mapcount for small folios (diff)
downloadlinux-1a83a716ec233990e1fd5b6fbb1200ade63bf450.tar.xz
linux-1a83a716ec233990e1fd5b6fbb1200ade63bf450.zip
mm: krealloc: consider spare memory for __GFP_ZERO
As long as krealloc() is called with __GFP_ZERO consistently, starting with the initial memory allocation, __GFP_ZERO should be fully honored. However, if for an existing allocation krealloc() is called with a decreased size, it is not ensured that the spare portion the allocation is zeroed. Thus, if krealloc() is subsequently called with a larger size again, __GFP_ZERO can't be fully honored, since we don't know the previous size, but only the bucket size. Example: buf = kzalloc(64, GFP_KERNEL); memset(buf, 0xff, 64); buf = krealloc(buf, 48, GFP_KERNEL | __GFP_ZERO); /* After this call the last 16 bytes are still 0xff. */ buf = krealloc(buf, 64, GFP_KERNEL | __GFP_ZERO); Fix this, by explicitly setting spare memory to zero, when shrinking an allocation with __GFP_ZERO flag set or init_on_alloc enabled. Link: https://lkml.kernel.org/r/20240812223707.32049-1-dakr@kernel.org Signed-off-by: Danilo Krummrich <dakr@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 40b582a014b8..cff602cedf8e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1273,6 +1273,13 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
/* If the object still fits, repoison it precisely. */
if (ks >= new_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags)) {
+ kasan_disable_current();
+ memset((void *)p + new_size, 0, ks - new_size);
+ kasan_enable_current();
+ }
+
p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}