summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-05-09 17:09:53 +0200
committerPekka Enberg <penberg@kernel.org>2012-06-01 08:25:40 +0200
commit7ced3719719669ad6bd279b45fa3c1a517b2e057 (patch)
treeb50ad0437b1bff8e772a53f0ffdb7392f501ea39 /mm/slub.c
parentslub: Add frozen check in __slab_alloc (diff)
downloadlinux-7ced3719719669ad6bd279b45fa3c1a517b2e057.tar.xz
linux-7ced3719719669ad6bd279b45fa3c1a517b2e057.zip
slub: Acquire_slab() avoid loop
Avoid the loop in acquire slab and simply fail if there is a conflict. This will cause the next page on the list to be considered. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a3395c28f561..9892775349bf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
}
/*
- * Lock slab, remove from the partial list and put the object into the
- * per cpu freelist.
+ * Remove slab from the partial list, freeze it and
+ * return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
*
- * Must hold list_lock.
+ * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1510,22 +1510,24 @@ static inline void *acquire_slab(struct kmem_cache *s,
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
- do {
- freelist = page->freelist;
- counters = page->counters;
- new.counters = counters;
- if (mode)
- new.inuse = page->objects;
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ if (mode)
+ new.inuse = page->objects;
- VM_BUG_ON(new.frozen);
- new.frozen = 1;
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
- } while (!__cmpxchg_double_slab(s, page,
+ if (!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
- "lock and freeze"));
+ "acquire_slab"))
+
+ return NULL;
remove_partial(n, page);
+ WARN_ON(!freelist);
return freelist;
}