diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-04-14 18:11:41 +0200 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-04-27 17:28:18 +0200 |
commit | 31d33baf36bda7a2fea800648d87c9fe6155e7ca (patch) | |
tree | d84cdc6976f1417958a5b613541e216b9b6fbf16 | |
parent | slub: Make the order configurable for each slab cache (diff) | |
download | linux-31d33baf36bda7a2fea800648d87c9fe6155e7ca.tar.xz linux-31d33baf36bda7a2fea800648d87c9fe6155e7ca.zip |
slub: Simplify any_slab_object checks
Since we now have total_objects counter per node use that to
check for the presence of any objects. The loop over all cpu slabs
is not that useful since any cpu slab would require an object allocation
first. So drop that.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | mm/slub.c | 10 |
1 files changed, 1 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c index 23a2683d6c9f..06533f342be0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3775,14 +3775,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, static int any_slab_objects(struct kmem_cache *s) { int node; - int cpu; - - for_each_possible_cpu(cpu) { - struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); - - if (c && c->page) - return 1; - } for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); @@ -3790,7 +3782,7 @@ static int any_slab_objects(struct kmem_cache *s) if (!n) continue; - if (n->nr_partial || atomic_long_read(&n->nr_slabs)) + if (atomic_read(&n->total_objects)) return 1; } return 0; |