summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c47
1 files changed, 4 insertions, 43 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a079b8540334..c40227d5fa07 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -40,11 +40,6 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
-static LIST_HEAD(slab_caches_to_rcu_destroy);
-static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
-static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
- slab_caches_to_rcu_destroy_workfn);
-
/*
* Set of flags that will prevent slab merging
*/
@@ -499,33 +494,6 @@ static void kmem_cache_release(struct kmem_cache *s)
slab_kmem_cache_release(s);
}
-static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
-{
- LIST_HEAD(to_destroy);
- struct kmem_cache *s, *s2;
-
- /*
- * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
- * @slab_caches_to_rcu_destroy list. The slab pages are freed
- * through RCU and the associated kmem_cache are dereferenced
- * while freeing the pages, so the kmem_caches should be freed only
- * after the pending RCU operations are finished. As rcu_barrier()
- * is a pretty slow operation, we batch all pending destructions
- * asynchronously.
- */
- mutex_lock(&slab_mutex);
- list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
- mutex_unlock(&slab_mutex);
-
- if (list_empty(&to_destroy))
- return;
-
- rcu_barrier();
-
- list_for_each_entry_safe(s, s2, &to_destroy, list)
- kmem_cache_release(s);
-}
-
void slab_kmem_cache_release(struct kmem_cache *s)
{
__kmem_cache_release(s);
@@ -535,7 +503,6 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- bool rcu_set;
int err;
if (unlikely(!s) || !kasan_check_byte(s))
@@ -551,8 +518,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
return;
}
- rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
-
/* free asan quarantined objects */
kasan_cache_shutdown(s);
@@ -572,14 +537,10 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (err)
return;
- if (rcu_set) {
- mutex_lock(&slab_mutex);
- list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
- schedule_work(&slab_caches_to_rcu_destroy_work);
- mutex_unlock(&slab_mutex);
- } else {
- kmem_cache_release(s);
- }
+ if (s->flags & SLAB_TYPESAFE_BY_RCU)
+ rcu_barrier();
+
+ kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);