summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-08-23 19:04:00 +0200
committerVlastimil Babka <vbabka@suse.cz>2022-09-17 00:18:35 +0200
commit5875e59828a026e47f37c5e343f4fe8e0ba023b9 (patch)
tree915c179f37bb1fe6cd3c22c7e7954e8f614461c4 /mm/slub.c
parentmm/slub: convert object_map_lock to non-raw spinlock (diff)
downloadlinux-5875e59828a026e47f37c5e343f4fe8e0ba023b9.tar.xz
linux-5875e59828a026e47f37c5e343f4fe8e0ba023b9.zip
mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()
The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab() (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables preemption and that's sufficient on PREEMPT_RT where no allocation/free operation is performed in hardirq context and so can't interrupt the current operation. That means we no longer need the slab_[un]lock() wrappers, so delete them and rename the current __slab_[un]lock() to slab_[un]lock(). Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c39
1 files changed, 12 insertions, 27 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e0759b5fe506..d58f19502a92 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/*
* Per slab locking using the pagelock
*/
-static __always_inline void __slab_lock(struct slab *slab)
+static __always_inline void slab_lock(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
bit_spin_lock(PG_locked, &page->flags);
}
-static __always_inline void __slab_unlock(struct slab *slab)
+static __always_inline void slab_unlock(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
__bit_spin_unlock(PG_locked, &page->flags);
}
-static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
-{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_save(*flags);
- __slab_lock(slab);
-}
-
-static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
-{
- __slab_unlock(slab);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(*flags);
-}
-
/*
* Interrupts must be disabled (for the fallback code to work right), typically
- * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
- * so we disable interrupts as part of slab_[un]lock().
+ * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
+ * part of bit_spin_lock(), is sufficient because the policy is not to allow any
+ * allocation/ free operation in hardirq context. Therefore nothing can
+ * interrupt the operation.
*/
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
void *freelist_old, unsigned long counters_old,
@@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
} else
#endif
{
- /* init to 0 to prevent spurious warnings */
- unsigned long flags = 0;
-
- slab_lock(slab, &flags);
+ slab_lock(slab);
if (slab->freelist == freelist_old &&
slab->counters == counters_old) {
slab->freelist = freelist_new;
slab->counters = counters_new;
- slab_unlock(slab, &flags);
+ slab_unlock(slab);
return true;
}
- slab_unlock(slab, &flags);
+ slab_unlock(slab);
}
cpu_relax();
@@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
unsigned long flags;
local_irq_save(flags);
- __slab_lock(slab);
+ slab_lock(slab);
if (slab->freelist == freelist_old &&
slab->counters == counters_old) {
slab->freelist = freelist_new;
slab->counters = counters_new;
- __slab_unlock(slab);
+ slab_unlock(slab);
local_irq_restore(flags);
return true;
}
- __slab_unlock(slab);
+ slab_unlock(slab);
local_irq_restore(flags);
}