summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-10-26 13:39:14 +0200
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:26:01 +0100
commit0393895b091227e8a77dfd5e6a6ab61bd11b8df0 (patch)
tree301a3de9914cac0dae2971692ff7300f023648f6
parentmm/slub: Convert kfree() to use a struct slab (diff)
downloadlinux-0393895b091227e8a77dfd5e6a6ab61bd11b8df0.tar.xz
linux-0393895b091227e8a77dfd5e6a6ab61bd11b8df0.zip
mm/slub: Convert __slab_lock() and __slab_unlock() to struct slab
These functions operate on the PG_locked page flag, but make them accept struct slab to encapsulate this implementation detail. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
-rw-r--r--mm/slub.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index acf2608a57c5..14550e7bee71 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -440,14 +440,18 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/*
* Per slab locking using the pagelock
*/
-static __always_inline void __slab_lock(struct page *page)
+static __always_inline void __slab_lock(struct slab *slab)
{
+ struct page *page = slab_page(slab);
+
VM_BUG_ON_PAGE(PageTail(page), page);
bit_spin_lock(PG_locked, &page->flags);
}
-static __always_inline void __slab_unlock(struct page *page)
+static __always_inline void __slab_unlock(struct slab *slab)
{
+ struct page *page = slab_page(slab);
+
VM_BUG_ON_PAGE(PageTail(page), page);
__bit_spin_unlock(PG_locked, &page->flags);
}
@@ -456,12 +460,12 @@ static __always_inline void slab_lock(struct page *page, unsigned long *flags)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_save(*flags);
- __slab_lock(page);
+ __slab_lock(page_slab(page));
}
static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
{
- __slab_unlock(page);
+ __slab_unlock(page_slab(page));
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(*flags);
}
@@ -530,16 +534,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
unsigned long flags;
local_irq_save(flags);
- __slab_lock(page);
+ __slab_lock(page_slab(page));
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
- __slab_unlock(page);
+ __slab_unlock(page_slab(page));
local_irq_restore(flags);
return true;
}
- __slab_unlock(page);
+ __slab_unlock(page_slab(page));
local_irq_restore(flags);
}