summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c62
-rw-r--r--mm/kasan/kasan_test.c46
2 files changed, 83 insertions, 25 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 85e7c6b4575c..ed4873e18c75 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -208,15 +208,12 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+/* Returns true when freeing the object is not safe. */
+static bool check_slab_allocation(struct kmem_cache *cache, void *object,
+ unsigned long ip)
{
- void *tagged_object;
-
- if (!kasan_arch_is_ready())
- return false;
+ void *tagged_object = object;
- tagged_object = object;
object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
@@ -224,37 +221,47 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
return true;
}
- /* RCU slabs could be legally used after free within the RCU period. */
- if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return false;
-
if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
return true;
}
+ return false;
+}
+
+static inline void poison_slab_object(struct kmem_cache *cache, void *object,
+ bool init, bool still_accessible)
+{
+ void *tagged_object = object;
+
+ object = kasan_reset_tag(object);
+
+ /* RCU slabs could be legally used after free within the RCU period. */
+ if (unlikely(still_accessible))
+ return;
+
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
+}
- return false;
+bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
+ unsigned long ip)
+{
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
+ return false;
+ return check_slab_allocation(cache, object, ip);
}
-bool __kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
+ bool still_accessible)
{
- if (is_kfence_address(object))
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
return false;
- /*
- * If the object is buggy, do not let slab put the object onto the
- * freelist. The object will thus never be allocated again and its
- * metadata will never get released.
- */
- if (poison_slab_object(cache, object, ip, init))
- return true;
+ poison_slab_object(cache, object, init, still_accessible);
/*
* If the object is put into quarantine, do not let slab put the object
@@ -504,11 +511,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true;
}
- if (is_kfence_address(ptr))
- return false;
+ if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+ return true;
slab = folio_slab(folio);
- return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+
+ if (check_slab_allocation(slab->slab_cache, ptr, ip))
+ return false;
+
+ poison_slab_object(slab->slab_cache, ptr, false, false);
+ return true;
}
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 7b32be2a3cf0..567d33b493e2 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -996,6 +996,51 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void kmem_cache_rcu_uaf(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+ *p = 1;
+
+ rcu_read_lock();
+
+ /* Free the object - this will internally schedule an RCU callback. */
+ kmem_cache_free(cache, p);
+
+ /*
+ * We should still be allowed to access the object at this point because
+ * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
+ * critical section since before the kmem_cache_free().
+ */
+ READ_ONCE(*p);
+
+ rcu_read_unlock();
+
+ /*
+ * Wait for the RCU callback to execute; after this, the object should
+ * have actually been freed from KASAN's perspective.
+ */
+ rcu_barrier();
+
+ KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+
+ kmem_cache_destroy(cache);
+}
+
static void empty_cache_ctor(void *object) { }
static void kmem_cache_double_destroy(struct kunit *test)
@@ -1937,6 +1982,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_oob),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kmem_cache_rcu_uaf),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),