summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-12-19 23:28:46 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 20:58:36 +0100
commit9b94fe91099cbf05606151ef05bea9632666f5d5 (patch)
tree9078af19543cb1ea74ba35bc5726c0f9f06afbfe /mm/kasan
parentkasan: rename kasan_slab_free_mempool to kasan_mempool_poison_object (diff)
downloadlinux-9b94fe91099cbf05606151ef05bea9632666f5d5.tar.xz
linux-9b94fe91099cbf05606151ef05bea9632666f5d5.zip
kasan: move kasan_mempool_poison_object
Move kasan_mempool_poison_object after all slab-related KASAN hooks. This is a preparatory change for the following patches in this series. No functional changes. Link: https://lkml.kernel.org/r/23ea215409f43c13cdf9ecc454501a264c107d67.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Cc: Alexander Lobakin <alobakin@pm.me> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Breno Leitao <leitao@debian.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index e0394d0ee7f1..fc7f711607e1 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -282,29 +282,6 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
____kasan_kfree_large(ptr, ip);
}
-void __kasan_mempool_poison_object(void *ptr, unsigned long ip)
-{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
-
- /*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
- */
- if (unlikely(!folio_test_slab(folio))) {
- if (____kasan_kfree_large(ptr, ip))
- return;
- kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
- } else {
- struct slab *slab = folio_slab(folio);
-
- ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
- }
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
void *object, gfp_t flags, bool init)
{
@@ -452,6 +429,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
}
+void __kasan_mempool_poison_object(void *ptr, unsigned long ip)
+{
+ struct folio *folio;
+
+ folio = virt_to_folio(ptr);
+
+ /*
+ * Even though this function is only called for kmem_cache_alloc and
+ * kmalloc backed mempool allocations, those allocations can still be
+ * !PageSlab() when the size provided to kmalloc is larger than
+ * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ */
+ if (unlikely(!folio_test_slab(folio))) {
+ if (____kasan_kfree_large(ptr, ip))
+ return;
+ kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
+ } else {
+ struct slab *slab = folio_slab(folio);
+
+ ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
+ }
+}
+
bool __kasan_check_byte(const void *address, unsigned long ip)
{
if (!kasan_byte_accessible(address)) {