summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2018-02-07 00:36:23 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-07 03:32:42 +0100
commit47adccce3e8a31d315f47183ab1185862b2fc5d4 (patch)
treeaaf982ef67484b702b42440b459cfd648e58dc4e /mm/kasan
parentkasan: add functions for unpoisoning stack variables (diff)
downloadlinux-47adccce3e8a31d315f47183ab1185862b2fc5d4.tar.xz
linux-47adccce3e8a31d315f47183ab1185862b2fc5d4.zip
kasan: detect invalid frees for large objects
Patch series "kasan: detect invalid frees". KASAN detects double-frees, but does not detect invalid-frees (when a pointer into a middle of heap object is passed to free). We recently had a very unpleasant case in crypto code which freed an inner object inside of a heap allocation. This left unnoticed during free, but totally corrupted heap and later lead to a bunch of random crashes all over kernel code. Detect invalid frees. This patch (of 5): Detect frees of pointers into middle of large heap objects. I dropped const from kasan_kfree_large() because it starts propagating through a bunch of functions in kasan_report.c, slab/slub nearest_obj(), all of their local variables, fixup_red_left(), etc. Link: http://lkml.kernel.org/r/1b45b4fe1d20fc0de1329aab674c1dd973fee723.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>a Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan.c12
-rw-r--r--mm/kasan/kasan.h3
-rw-r--r--mm/kasan/report.c3
3 files changed, 7 insertions, 11 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 8aaee42fcfab..ecb64fda79e6 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -511,8 +511,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
- kasan_report_double_free(cache, object,
- __builtin_return_address(1));
+ kasan_report_invalid_free(object, __builtin_return_address(1));
return true;
}
@@ -602,12 +601,11 @@ void kasan_poison_kfree(void *ptr)
kasan_poison_slab_free(page->slab_cache, ptr);
}
-void kasan_kfree_large(const void *ptr)
+void kasan_kfree_large(void *ptr)
{
- struct page *page = virt_to_page(ptr);
-
- kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
- KASAN_FREE_PAGE);
+ if (ptr != page_address(virt_to_head_page(ptr)))
+ kasan_report_invalid_free(ptr, __builtin_return_address(1));
+ /* The object will be poisoned by page_alloc. */
}
int kasan_module_alloc(void *addr, size_t size)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 9a768dd71c51..bf353a18c908 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -107,8 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
-void kasan_report_double_free(struct kmem_cache *cache, void *object,
- void *ip);
+void kasan_report_invalid_free(void *object, void *ip);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index eff12e040498..55916ad21722 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -326,8 +326,7 @@ static void print_shadow_for_address(const void *addr)
}
}
-void kasan_report_double_free(struct kmem_cache *cache, void *object,
- void *ip)
+void kasan_report_invalid_free(void *object, void *ip)
{
unsigned long flags;