diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2021-05-12 13:59:58 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2021-09-04 01:12:21 +0200 |
commit | 3406e91bce47383f03fe839f02f7f4bef78c832c (patch) | |
tree | 335e297061f43a5a920b7197fddfa74805002688 /mm/slub.c | |
parent | mm, slub: move reset of c->page and freelist out of deactivate_slab() (diff) | |
download | linux-3406e91bce47383f03fe839f02f7f4bef78c832c.tar.xz linux-3406e91bce47383f03fe839f02f7f4bef78c832c.zip |
mm, slub: make locking in deactivate_slab() irq-safe
dectivate_slab() now no longer touches the kmem_cache_cpu structure, so it will
be possible to call it with irqs enabled. Just convert the spin_lock calls to
their irq saving/restoring variants to make it irq-safe.
Note we now have to use cmpxchg_double_slab() for irq-safe slab_lock(), because
in some situations we don't take the list_lock, which would disable irqs.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c index cea7a2ad9e3e..6deb4080ef54 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2223,6 +2223,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, enum slab_modes l = M_NONE, m = M_NONE; void *nextfree, *freelist_iter, *freelist_tail; int tail = DEACTIVATE_TO_HEAD; + unsigned long flags = 0; struct page new; struct page old; @@ -2298,7 +2299,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + spin_lock_irqsave(&n->list_lock, flags); } } else { m = M_FULL; @@ -2309,7 +2310,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + spin_lock_irqsave(&n->list_lock, flags); } } @@ -2326,14 +2327,14 @@ redo: } l = m; - if (!__cmpxchg_double_slab(s, page, + if (!cmpxchg_double_slab(s, page, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")) goto redo; if (lock) - spin_unlock(&n->list_lock); + spin_unlock_irqrestore(&n->list_lock, flags); if (m == M_PARTIAL) stat(s, tail); |