summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 23:49:42 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 21:12:54 +0200
commit643b113849d8faa68c9f01c3c9d929bfbffd50bd (patch)
treed8eea2326ccee49892acaa970bf015ee69a31e8a /mm/slub.c
parentslub: fix object tracking (diff)
downloadlinux-643b113849d8faa68c9f01c3c9d929bfbffd50bd.tar.xz
linux-643b113849d8faa68c9f01c3c9d929bfbffd50bd.zip
slub: enable tracking of full slabs
If slab tracking is on then build a list of full slabs so that we can verify the integrity of all slabs and are also able to built list of alloc/free callers. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index cfc5301afe42..c4f40d373d1e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
return search == NULL;
}
+/*
+ * Tracking of fully allocated slabs for debugging
+ */
+static void add_full(struct kmem_cache *s, struct page *page)
+{
+ struct kmem_cache_node *n;
+
+ VM_BUG_ON(!irqs_disabled());
+
+ VM_BUG_ON(!irqs_disabled());
+
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ n = get_node(s, page_to_nid(page));
+ spin_lock(&n->list_lock);
+ list_add(&page->lru, &n->full);
+ spin_unlock(&n->list_lock);
+}
+
+static void remove_full(struct kmem_cache *s, struct page *page)
+{
+ struct kmem_cache_node *n;
+
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
+ list_del(&page->lru);
+ spin_unlock(&n->list_lock);
+}
+
static int alloc_object_checks(struct kmem_cache *s, struct page *page,
void *object)
{
@@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
if (page->inuse) {
if (page->freelist)
add_partial(s, page);
+ else if (PageError(page))
+ add_full(s, page);
slab_unlock(page);
} else {
slab_unlock(page);
@@ -1302,7 +1338,7 @@ out_unlock:
slab_empty:
if (prior)
/*
- * Partially used slab that is on the partial list.
+ * Slab on the partial list.
*/
remove_partial(s, page);
@@ -1314,6 +1350,8 @@ slab_empty:
debug:
if (!free_object_checks(s, page, x))
goto out_unlock;
+ if (!PageActive(page) && !page->freelist)
+ remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, x, TRACK_FREE, addr);
goto checks_ok;
@@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
+ INIT_LIST_HEAD(&n->full);
}
#ifdef CONFIG_NUMA