summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2018-02-07 00:36:27 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-07 03:32:43 +0100
commitee3ce779b58c31acacdfab0ad6c86d428ba2c2e3 (patch)
treee27239a0a81672dfea5f70c893fb92f3615ca47e /mm
parentkasan: detect invalid frees for large objects (diff)
downloadlinux-ee3ce779b58c31acacdfab0ad6c86d428ba2c2e3.tar.xz
linux-ee3ce779b58c31acacdfab0ad6c86d428ba2c2e3.zip
kasan: don't use __builtin_return_address(1)
__builtin_return_address(1) is unreliable without frame pointers. With defconfig on kmalloc_pagealloc_invalid_free test I am getting: BUG: KASAN: double-free or invalid-free in (null) Pass caller PC from callers explicitly. Link: http://lkml.kernel.org/r/9b01bc2d237a4df74ff8472a3bf6b7635908de01.1514378558.git.dvyukov@google.com Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>a Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/kasan.c8
-rw-r--r--mm/kasan/kasan.h2
-rw-r--r--mm/kasan/report.c4
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c8
5 files changed, 14 insertions, 14 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index ecb64fda79e6..32f555ded938 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -501,7 +501,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}
-bool kasan_slab_free(struct kmem_cache *cache, void *object)
+bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
{
s8 shadow_byte;
@@ -511,7 +511,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
- kasan_report_invalid_free(object, __builtin_return_address(1));
+ kasan_report_invalid_free(object, ip);
return true;
}
@@ -601,10 +601,10 @@ void kasan_poison_kfree(void *ptr)
kasan_poison_slab_free(page->slab_cache, ptr);
}
-void kasan_kfree_large(void *ptr)
+void kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr)))
- kasan_report_invalid_free(ptr, __builtin_return_address(1));
+ kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by page_alloc. */
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index bf353a18c908..c12dcfde2ebd 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -107,7 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
-void kasan_report_invalid_free(void *object, void *ip);
+void kasan_report_invalid_free(void *object, unsigned long ip);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 55916ad21722..75206991ece0 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -326,12 +326,12 @@ static void print_shadow_for_address(const void *addr)
}
}
-void kasan_report_invalid_free(void *object, void *ip)
+void kasan_report_invalid_free(void *object, unsigned long ip)
{
unsigned long flags;
kasan_start_report(&flags);
- pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip);
+ pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
pr_err("\n");
print_address_description(object);
pr_err("\n");
diff --git a/mm/slab.c b/mm/slab.c
index cd86f15071ad..324446621b3e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3478,11 +3478,11 @@ free_done:
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
-static inline void __cache_free(struct kmem_cache *cachep, void *objp,
- unsigned long caller)
+static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
+ unsigned long caller)
{
/* Put the object into the quarantine, don't touch it for now. */
- if (kasan_slab_free(cachep, objp))
+ if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
___cache_free(cachep, objp, caller);
diff --git a/mm/slub.c b/mm/slub.c
index b54f8787c674..e381728a3751 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1356,13 +1356,13 @@ static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
kasan_kmalloc_large(ptr, size, flags);
}
-static inline void kfree_hook(void *x)
+static __always_inline void kfree_hook(void *x)
{
kmemleak_free(x);
- kasan_kfree_large(x);
+ kasan_kfree_large(x, _RET_IP_);
}
-static inline void *slab_free_hook(struct kmem_cache *s, void *x)
+static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
{
void *freeptr;
@@ -1390,7 +1390,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
* kasan_slab_free() may put x into memory quarantine, delaying its
* reuse. In this case the object's freelist pointer is changed.
*/
- kasan_slab_free(s, x);
+ kasan_slab_free(s, x, _RET_IP_);
return freeptr;
}