summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 09:30:46 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 21:11:44 +0100
commit5b7c4148222d7acaa1612e5eec84fc66c88d54f3 (patch)
tree6afb900ecc25c32780ee8c33fe15838a5d8870ce /mm/slab.c
parentkasan: add bug reporting routines for tag-based mode (diff)
downloadlinux-5b7c4148222d7acaa1612e5eec84fc66c88d54f3.tar.xz
linux-5b7c4148222d7acaa1612e5eec84fc66c88d54f3.zip
mm: move obj_to_index to include/linux/slab_def.h
While with SLUB we can actually preassign tags for caches with contructors and store them in pointers in the freelist, SLAB doesn't allow that since the freelist is stored as an array of indexes, so there are no pointers to store the tags. Instead we compute the tag twice, once when a slab is created before calling the constructor and then again each time when an object is allocated with kmalloc. Tag is computed simply by taking the lowest byte of the index that corresponds to the object. However in kasan_kmalloc we only have access to the objects pointer, so we need a way to find out which index this object corresponds to. This patch moves obj_to_index from slab.c to include/linux/slab_def.h to be reused by KASAN. Link: http://lkml.kernel.org/r/c02cd9e574cfd93858e43ac94b05e38f891fef64.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c13
1 files changed, 0 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 15e53cef0378..a80beb543678 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -406,19 +406,6 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
return page->s_mem + cache->size * idx;
}
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
-{
- u32 offset = (obj - page->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
#define BOOT_CPUCACHE_ENTRIES 1
/* internal cache of cache description objs */
static struct kmem_cache kmem_cache_boot = {