summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2021-02-24 21:05:50 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-24 22:38:31 +0100
commit611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2 (patch)
treebc3e29573db25c2b8c21c03f03b5e9544aaa6013 /mm/kasan
parentkasan: move _RET_IP_ to inline wrappers (diff)
downloadlinux-611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2.tar.xz
linux-611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2.zip
kasan: fix bug detection via ksize for HW_TAGS mode
The currently existing kasan_check_read/write() annotations are intended to be used for kernel modules that have KASAN compiler instrumentation disabled. Thus, they are only relevant for the software KASAN modes that rely on compiler instrumentation. However there's another use case for these annotations: ksize() checks that the object passed to it is indeed accessible before unpoisoning the whole object. This is currently done via __kasan_check_read(), which is compiled away for the hardware tag-based mode that doesn't rely on compiler instrumentation. This leads to KASAN missing detecting some memory corruptions. Provide another annotation called kasan_check_byte() that is available for all KASAN modes. As the implementation rename and reuse kasan_check_invalid_free(). Use this new annotation in ksize(). To avoid having ksize() as the top frame in the reported stack trace pass _RET_IP_ to __kasan_check_byte(). Also add a new ksize_uaf() test that checks that a use-after-free is detected via ksize() itself, and via plain accesses that happen later. Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5 Link: https://lkml.kernel.org/r/f32ad74a60b28d8402482a38476f02bb7600f620.1610733117.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c11
-rw-r--r--mm/kasan/generic.c4
-rw-r--r--mm/kasan/kasan.h10
-rw-r--r--mm/kasan/sw_tags.c6
4 files changed, 20 insertions, 11 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index eedc3e0fe365..b18189ef3a92 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -345,7 +345,7 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
- if (kasan_check_invalid_free(tagged_object)) {
+ if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip);
return true;
}
@@ -490,3 +490,12 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by kasan_free_pages(). */
}
+
+bool __kasan_check_byte(const void *address, unsigned long ip)
+{
+ if (!kasan_byte_accessible(address)) {
+ kasan_report((unsigned long)address, 1, false, ip);
+ return false;
+ }
+ return true;
+}
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index acab8862dc67..3f17a1218055 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -185,11 +185,11 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
return check_region_inline(addr, size, write, ret_ip);
}
-bool kasan_check_invalid_free(void *addr)
+bool kasan_byte_accessible(const void *addr)
{
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
- return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
+ return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
}
void kasan_cache_shrink(struct kmem_cache *cache)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1298b79f9518..cc14b6e6c14c 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -341,20 +341,20 @@ static inline void kasan_unpoison(const void *address, size_t size)
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
}
-static inline bool kasan_check_invalid_free(void *addr)
+static inline bool kasan_byte_accessible(const void *addr)
{
u8 ptr_tag = get_tag(addr);
- u8 mem_tag = hw_get_mem_tag(addr);
+ u8 mem_tag = hw_get_mem_tag((void *)addr);
- return (mem_tag == KASAN_TAG_INVALID) ||
- (ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag);
+ return (mem_tag != KASAN_TAG_INVALID) &&
+ (ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag);
}
#else /* CONFIG_KASAN_HW_TAGS */
void kasan_poison(const void *address, size_t size, u8 value);
void kasan_unpoison(const void *address, size_t size);
-bool kasan_check_invalid_free(void *addr);
+bool kasan_byte_accessible(const void *addr);
#endif /* CONFIG_KASAN_HW_TAGS */
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index cc271fceb5d5..94c2d33be333 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -118,13 +118,13 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
return true;
}
-bool kasan_check_invalid_free(void *addr)
+bool kasan_byte_accessible(const void *addr)
{
u8 tag = get_tag(addr);
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));
- return (shadow_byte == KASAN_TAG_INVALID) ||
- (tag != KASAN_TAG_KERNEL && tag != shadow_byte);
+ return (shadow_byte != KASAN_TAG_INVALID) &&
+ (tag == KASAN_TAG_KERNEL || tag == shadow_byte);
}
#define DEFINE_HWASAN_LOAD_STORE(size) \