summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorAlexander Popov <alex.popov@linux.com>2020-12-15 04:04:33 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 21:13:37 +0100
commita32d654db543843a5ffb248feaec1a909718addd (patch)
treee7d4d79eb4ff3668d1c394ee4f709c76ccfea77d /mm/slab.c
parentmm, slab, slub: clear the slab_cache field when freeing page (diff)
downloadlinux-a32d654db543843a5ffb248feaec1a909718addd.tar.xz
linux-a32d654db543843a5ffb248feaec1a909718addd.zip
mm/slab: rerform init_on_free earlier
Currently in CONFIG_SLAB init_on_free happens too late, and heap objects go to the heap quarantine not being erased. Lets move init_on_free clearing before calling kasan_slab_free(). In that case heap quarantine will store erased objects, similarly to CONFIG_SLUB=y behavior. Link: https://lkml.kernel.org/r/20201210183729.1261524-1-alex.popov@linux.com Signed-off-by: Alexander Popov <alex.popov@linux.com> Reviewed-by: Alexander Potapenko <glider@google.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2e67a513b0c9..176b65e2157d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3417,6 +3417,9 @@ free_done:
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ if (unlikely(slab_want_init_on_free(cachep)))
+ memset(objp, 0, cachep->object_size);
+
/* Put the object into the quarantine, don't touch it for now. */
if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
@@ -3435,8 +3438,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
- if (unlikely(slab_want_init_on_free(cachep)))
- memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
memcg_slab_free_hook(cachep, &objp, 1);