summaryrefslogtreecommitdiffstats
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2020-12-22 21:03:13 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-22 21:55:09 +0100
commiteeb3160c2419e0f1045537acac7b19cba64112f4 (patch)
treedb6e10f71b7b5d585c33c6037c33877b4395e387 /mm/kasan/common.c
parentkasan, mm: check kasan_enabled in annotations (diff)
downloadlinux-eeb3160c2419e0f1045537acac7b19cba64112f4.tar.xz
linux-eeb3160c2419e0f1045537acac7b19cba64112f4.zip
kasan, mm: rename kasan_poison_kfree
Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better reflects what this annotation does. Also add a comment that explains the PageSlab() check. No functional changes. Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810 Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index ae0130cf9de3..d0f8d7a955cd 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return ____kasan_slab_free(cache, object, ip, true);
}
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+{
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+
+ /*
+ * Even though this function is only called for kmem_cache_alloc and
+ * kmalloc backed mempool allocations, those allocations can still be
+ * !PageSlab() when the size provided to kmalloc is larger than
+ * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ */
+ if (unlikely(!PageSlab(page))) {
+ if (ptr != page_address(page)) {
+ kasan_report_invalid_free(ptr, ip);
+ return;
+ }
+ poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
+ } else {
+ ____kasan_slab_free(page->slab_cache, ptr, ip, false);
+ }
+}
+
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
@@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
flags, true);
}
-void __kasan_poison_kfree(void *ptr, unsigned long ip)
-{
- struct page *page;
-
- page = virt_to_head_page(ptr);
-
- if (unlikely(!PageSlab(page))) {
- if (ptr != page_address(page)) {
- kasan_report_invalid_free(ptr, ip);
- return;
- }
- poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
- } else {
- ____kasan_slab_free(page->slab_cache, ptr, ip, false);
- }
-}
-
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr)))