summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 09:30:23 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 21:11:43 +0100
commit4d176711ea7a8d4873e7157ac6ab242ade3ba351 (patch)
treed3612e442e860eae9f7130add02612507d9c0776 /mm/slub.c
parentkasan, arm64: untag address in _virt_addr_is_linear (diff)
downloadlinux-4d176711ea7a8d4873e7157ac6ab242ade3ba351.tar.xz
linux-4d176711ea7a8d4873e7157ac6ab242ade3ba351.zip
kasan: preassign tags to objects with ctors or SLAB_TYPESAFE_BY_RCU
An object constructor can initialize pointers within this objects based on the address of the object. Since the object address might be tagged, we need to assign a tag before calling constructor. The implemented approach is to assign tags to objects with constructors when a slab is allocated and call constructors once as usual. The downside is that such object would always have the same tag when it is reallocated, so we won't catch use-after-frees on it. Also pressign tags for objects from SLAB_TYPESAFE_BY_RCU caches, since they can be validy accessed after having been freed. Link: http://lkml.kernel.org/r/f158a8a74a031d66f0a9398a5b0ed453c37ba09a.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e739d46600b9..08740c3f3745 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1451,16 +1451,17 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
#endif
}
-static void setup_object(struct kmem_cache *s, struct page *page,
+static void *setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
- kasan_init_slab_obj(s, object);
+ object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
+ return object;
}
/*
@@ -1568,16 +1569,16 @@ static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
/* First entry is used as the base of the freelist */
cur = next_freelist_entry(s, page, &pos, start, page_limit,
freelist_count);
+ cur = setup_object(s, page, cur);
page->freelist = cur;
for (idx = 1; idx < page->objects; idx++) {
- setup_object(s, page, cur);
next = next_freelist_entry(s, page, &pos, start, page_limit,
freelist_count);
+ next = setup_object(s, page, next);
set_freepointer(s, cur, next);
cur = next;
}
- setup_object(s, page, cur);
set_freepointer(s, cur, NULL);
return true;
@@ -1599,7 +1600,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
- void *start, *p;
+ void *start, *p, *next;
int idx, order;
bool shuffle;
@@ -1651,13 +1652,16 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!shuffle) {
for_each_object_idx(p, idx, s, start, page->objects) {
- setup_object(s, page, p);
- if (likely(idx < page->objects))
- set_freepointer(s, p, p + s->size);
- else
+ if (likely(idx < page->objects)) {
+ next = p + s->size;
+ next = setup_object(s, page, next);
+ set_freepointer(s, p, next);
+ } else
set_freepointer(s, p, NULL);
}
- page->freelist = fixup_red_left(s, start);
+ start = fixup_red_left(s, start);
+ start = setup_object(s, page, start);
+ page->freelist = start;
}
page->inuse = page->objects;