diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2021-05-23 01:37:07 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2021-09-03 23:56:49 +0200 |
commit | 0a19e7dd928800da66efe429c25b0adc3a07c534 (patch) | |
tree | 69c7593e7d2a2d5aaf7a4bb4d1d232441716c680 | |
parent | mm, slub: allocate private object map for debugfs listings (diff) | |
download | linux-0a19e7dd928800da66efe429c25b0adc3a07c534.tar.xz linux-0a19e7dd928800da66efe429c25b0adc3a07c534.zip |
mm, slub: allocate private object map for validate_slab_cache()
validate_slab_cache() is called either to handle a sysfs write, or from a
self-test context. In both situations it's straightforward to preallocate a
private object bitmap instead of grabbing the shared static one meant for
critical sections, so let's do that.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
-rw-r--r-- | mm/slub.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c index fb603fdf58cb..4697280130f2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4679,11 +4679,11 @@ static int count_total(struct page *page) #endif #ifdef CONFIG_SLUB_DEBUG -static void validate_slab(struct kmem_cache *s, struct page *page) +static void validate_slab(struct kmem_cache *s, struct page *page, + unsigned long *obj_map) { void *p; void *addr = page_address(page); - unsigned long *map; slab_lock(page); @@ -4691,21 +4691,20 @@ static void validate_slab(struct kmem_cache *s, struct page *page) goto unlock; /* Now we know that a valid freelist exists */ - map = get_map(s, page); + __fill_map(obj_map, s, page); for_each_object(p, s, addr, page->objects) { - u8 val = test_bit(__obj_to_index(s, addr, p), map) ? + u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; if (!check_object(s, page, p, val)) break; } - put_map(map); unlock: slab_unlock(page); } static int validate_slab_node(struct kmem_cache *s, - struct kmem_cache_node *n) + struct kmem_cache_node *n, unsigned long *obj_map) { unsigned long count = 0; struct page *page; @@ -4714,7 +4713,7 @@ static int validate_slab_node(struct kmem_cache *s, spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) { - validate_slab(s, page); + validate_slab(s, page, obj_map); count++; } if (count != n->nr_partial) { @@ -4727,7 +4726,7 @@ static int validate_slab_node(struct kmem_cache *s, goto out; list_for_each_entry(page, &n->full, slab_list) { - validate_slab(s, page); + validate_slab(s, page, obj_map); count++; } if (count != atomic_long_read(&n->nr_slabs)) { @@ -4746,10 +4745,17 @@ long validate_slab_cache(struct kmem_cache *s) int node; unsigned long count = 0; struct kmem_cache_node *n; + unsigned long *obj_map; + + obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); + if (!obj_map) + return -ENOMEM; flush_all(s); for_each_kmem_cache_node(s, node, n) - count += validate_slab_node(s, n); + count += validate_slab_node(s, n, obj_map); + + bitmap_free(obj_map); return count; } |