summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/tree.c9
-rw-r--r--lib/slub_kunit.c18
-rw-r--r--mm/slab.h8
-rw-r--r--mm/slab_common.c5
-rw-r--r--mm/slub.c5
5 files changed, 30 insertions, 15 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a60616e69b66..b1f883fcd918 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3607,11 +3607,12 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
}
// One work is per one batch, so there are three
- // "free channels", the batch can handle. It can
- // be that the work is in the pending state when
- // channels have been detached following by each
- // other.
+ // "free channels", the batch can handle. Break
+ // the loop since it is done with this CPU thus
+ // queuing an RCU work is _always_ success here.
queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
+ WARN_ON_ONCE(!queued);
+ break;
}
}
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 6e3a1e5a7142..80e39f003344 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -164,10 +164,16 @@ struct test_kfree_rcu_struct {
static void test_kfree_rcu(struct kunit *test)
{
- struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
- sizeof(struct test_kfree_rcu_struct),
- SLAB_NO_MERGE);
- struct test_kfree_rcu_struct *p = kmem_cache_alloc(s, GFP_KERNEL);
+ struct kmem_cache *s;
+ struct test_kfree_rcu_struct *p;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+ p = kmem_cache_alloc(s, GFP_KERNEL);
kfree_rcu(p, rcu);
kmem_cache_destroy(s);
@@ -177,13 +183,13 @@ static void test_kfree_rcu(struct kunit *test)
static void test_leak_destroy(struct kunit *test)
{
- struct kmem_cache *s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
64, SLAB_NO_MERGE);
kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_destroy(s);
- KUNIT_EXPECT_EQ(test, 1, slab_errors);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
}
static int test_init(struct kunit *test)
diff --git a/mm/slab.h b/mm/slab.h
index f22fb760b286..6c6fe6d630ce 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -310,7 +310,7 @@ struct kmem_cache {
};
#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
+#define SLAB_SUPPORTS_SYSFS 1
void sysfs_slab_unlink(struct kmem_cache *s);
void sysfs_slab_release(struct kmem_cache *s);
#else
@@ -546,6 +546,12 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
return false;
}
+#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
+bool slab_in_kunit_test(void);
+#else
+static inline bool slab_in_kunit_test(void) { return false; }
+#endif
+
#ifdef CONFIG_SLAB_OBJ_EXT
/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 744324465615..3d26c257ed8b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -508,8 +508,9 @@ void kmem_cache_destroy(struct kmem_cache *s)
kasan_cache_shutdown(s);
err = __kmem_cache_shutdown(s);
- WARN(err, "%s %s: Slab cache still has objects when called from %pS",
- __func__, s->name, (void *)_RET_IP_);
+ if (!slab_in_kunit_test())
+ WARN(err, "%s %s: Slab cache still has objects when called from %pS",
+ __func__, s->name, (void *)_RET_IP_);
list_del(&s->list);
diff --git a/mm/slub.c b/mm/slub.c
index 21f71cb6cc06..5b832512044e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -827,7 +827,7 @@ static bool slab_add_kunit_errors(void)
return true;
}
-static bool slab_in_kunit_test(void)
+bool slab_in_kunit_test(void)
{
struct kunit_resource *resource;
@@ -843,7 +843,6 @@ static bool slab_in_kunit_test(void)
}
#else
static inline bool slab_add_kunit_errors(void) { return false; }
-static inline bool slab_in_kunit_test(void) { return false; }
#endif
static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -5436,6 +5435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
for_each_object(p, s, addr, slab->objects) {
if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
+ if (slab_add_kunit_errors())
+ continue;
pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}