diff options
author | Alexander Potapenko <glider@google.com> | 2016-03-25 22:21:59 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-26 00:37:42 +0100 |
commit | 7ed2f9e663854db313f177a511145630e398b402 (patch) | |
tree | f9dfba81a688864a4d78689470f624b0a482f545 /include | |
parent | kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right() (diff) | |
download | linux-7ed2f9e663854db313f177a511145630e398b402.tar.xz linux-7ed2f9e663854db313f177a511145630e398b402.zip |
mm, kasan: SLAB support
Add KASAN hooks to SLAB allocator.
This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/kasan.h | 12 | ||||
-rw-r--r-- | include/linux/slab.h | 6 | ||||
-rw-r--r-- | include/linux/slab_def.h | 14 | ||||
-rw-r--r-- | include/linux/slub_def.h | 11 |
4 files changed, 43 insertions, 0 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 0fdc798e3ff7..839f2007a0f9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -48,6 +48,9 @@ void kasan_unpoison_task_stack(struct task_struct *task); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); +void kasan_cache_create(struct kmem_cache *cache, size_t *size, + unsigned long *flags); + void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object); @@ -61,6 +64,11 @@ void kasan_krealloc(const void *object, size_t new_size); void kasan_slab_alloc(struct kmem_cache *s, void *object); void kasan_slab_free(struct kmem_cache *s, void *object); +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); @@ -76,6 +84,10 @@ static inline void kasan_disable_current(void) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_cache_create(struct kmem_cache *cache, + size_t *size, + unsigned long *flags) {} + static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} diff --git a/include/linux/slab.h b/include/linux/slab.h index e4b568738ca3..aa61595a1482 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -92,6 +92,12 @@ # define SLAB_ACCOUNT 0x00000000UL #endif +#ifdef CONFIG_KASAN +#define SLAB_KASAN 0x08000000UL +#else +#define SLAB_KASAN 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index e878ba35ae91..9edbbf352340 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -76,8 +76,22 @@ struct kmem_cache { #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; #endif +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif struct kmem_cache_node *node[MAX_NUMNODES]; }; +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) { + void *object = x - (x - page->s_mem) % cache->size; + void *last_object = page->s_mem + (cache->num - 1) * cache->size; + + if (unlikely(object > last_object)) + return last_object; + else + return object; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index ac5143f95ee6..665cd0cd18b8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -130,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s, void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) { + void *object = x - (x - page_address(page)) % cache->size; + void *last_object = page_address(page) + + (page->objects - 1) * cache->size; + if (unlikely(object > last_object)) + return last_object; + else + return object; +} + #endif /* _LINUX_SLUB_DEF_H */ |