summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAlex,Shi <alex.shi@intel.com>2011-09-07 04:26:36 +0200
committerPekka Enberg <penberg@kernel.org>2011-09-13 19:41:25 +0200
commit12d79634f8d7af5229b7d21143d50e7cf7d94177 (patch)
tree06096e3fdcc4c7fe84b9111d2f6274514bfb72b0 /mm/slub.c
parentslub: update slabinfo tools to report per cpu partial list statistics (diff)
downloadlinux-12d79634f8d7af5229b7d21143d50e7cf7d94177.tar.xz
linux-12d79634f8d7af5229b7d21143d50e7cf7d94177.zip
slub: Code optimization in get_partial_node()
I find a way to reduce a variable in get_partial_node(). That is also helpful for code understanding. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0e286acef62a..4982fb5c91de 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1600,7 +1600,6 @@ static void *get_partial_node(struct kmem_cache *s,
{
struct page *page, *page2;
void *object = NULL;
- int count = 0;
/*
* Racy check. If we mistakenly see no partial slabs then we
@@ -1613,17 +1612,16 @@ static void *get_partial_node(struct kmem_cache *s,
spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) {
- void *t = acquire_slab(s, n, page, count == 0);
+ void *t = acquire_slab(s, n, page, object == NULL);
int available;
if (!t)
break;
- if (!count) {
+ if (!object) {
c->page = page;
c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
- count++;
object = t;
available = page->objects - page->inuse;
} else {