summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorXiongwei Song <xiongwei.song@windriver.com>2023-04-13 16:34:52 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-05-22 15:20:21 +0200
commit8040cbf5e1da2fe86558239b92927a947c79ecd6 (patch)
tree117c0a6a9463eff9b69ecb38379603c992f20e6c /mm/slub.c
parentslub: Remove slabs_node() function (diff)
downloadlinux-8040cbf5e1da2fe86558239b92927a947c79ecd6.tar.xz
linux-8040cbf5e1da2fe86558239b92927a947c79ecd6.zip
slub: Don't read nr_slabs and total_objects directly
We have node_nr_slabs() to read nr_slabs, node_nr_objs() to read total_objects in a kmem_cache_node, so no need to access the two members directly. Signed-off-by: Xiongwei Song <xiongwei.song@windriver.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 53be9a208bb6..58cc832d0afd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5156,9 +5156,9 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, slab, obj_map);
count++;
}
- if (count != atomic_long_read(&n->nr_slabs)) {
+ if (count != node_nr_slabs(n)) {
pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
- s->name, count, atomic_long_read(&n->nr_slabs));
+ s->name, count, node_nr_slabs(n));
slab_add_kunit_errors();
}
@@ -5442,12 +5442,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL)
- x = atomic_long_read(&n->total_objects);
+ x = node_nr_objs(n);
else if (flags & SO_OBJECTS)
- x = atomic_long_read(&n->total_objects) -
- count_partial(n, count_free);
+ x = node_nr_objs(n) - count_partial(n, count_free);
else
- x = atomic_long_read(&n->nr_slabs);
+ x = node_nr_slabs(n);
total += x;
nodes[node] += x;
}
@@ -6386,7 +6385,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
unsigned long flags;
struct slab *slab;
- if (!atomic_long_read(&n->nr_slabs))
+ if (!node_nr_slabs(n))
continue;
spin_lock_irqsave(&n->list_lock, flags);