summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 09:29:41 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 21:11:43 +0100
commit12b22386998ccf97497a49c88f9579cf9c0dee55 (patch)
tree4850b8baabd38b72eadbcff359a500633af00bda /mm/slub.c
parentkasan, mm: change hooks signatures (diff)
downloadlinux-12b22386998ccf97497a49c88f9579cf9c0dee55.tar.xz
linux-12b22386998ccf97497a49c88f9579cf9c0dee55.zip
kasan, slub: handle pointer tags in early_kmem_cache_node_alloc
The previous patch updated KASAN hooks signatures and their usage in SLAB and SLUB code, except for the early_kmem_cache_node_alloc function. This patch handles that function separately, as it requires to reorder some of the initialization code to correctly propagate a tagged pointer in case a tag is assigned by kasan_kmalloc. Link: http://lkml.kernel.org/r/fc8d0fdcf733a7a52e8d0daaa650f4736a57de8c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index fdd4a86aa882..8561a32910dd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3364,16 +3364,16 @@ static void early_kmem_cache_node_alloc(int node)
n = page->freelist;
BUG_ON(!n);
- page->freelist = get_freepointer(kmem_cache_node, n);
- page->inuse = 1;
- page->frozen = 0;
- kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
- kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
+ n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
GFP_KERNEL);
+ page->freelist = get_freepointer(kmem_cache_node, n);
+ page->inuse = 1;
+ page->frozen = 0;
+ kmem_cache_node->node[node] = n;
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);