summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 22:54:53 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 00:55:16 +0100
commit70f75067b15659bb03404e75eded41011c67dc57 (patch)
tree62e7fc5d7802b82d1953be3b884a36aa860cbd49 /mm/slab.c
parentmm/slab: introduce new slab management type, OBJFREELIST_SLAB (diff)
downloadlinux-70f75067b15659bb03404e75eded41011c67dc57.tar.xz
linux-70f75067b15659bb03404e75eded41011c67dc57.zip
mm/slab: avoid returning values by reference
Returing values by reference is bad practice. Instead, just use function return value. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Suggested-by: Christoph Lameter <cl@linux.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/slab.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 85e394f5918c..4f4e6472db5b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -460,9 +460,10 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
/*
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
-static void cache_estimate(unsigned long gfporder, size_t buffer_size,
- unsigned long flags, size_t *left_over, unsigned int *num)
+static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
+ unsigned long flags, size_t *left_over)
{
+ unsigned int num;
size_t slab_size = PAGE_SIZE << gfporder;
/*
@@ -483,13 +484,15 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* correct alignment when allocated.
*/
if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
- *num = slab_size / buffer_size;
+ num = slab_size / buffer_size;
*left_over = slab_size % buffer_size;
} else {
- *num = slab_size / (buffer_size + sizeof(freelist_idx_t));
+ num = slab_size / (buffer_size + sizeof(freelist_idx_t));
*left_over = slab_size %
(buffer_size + sizeof(freelist_idx_t));
}
+
+ return num;
}
#if DEBUG
@@ -1893,7 +1896,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
unsigned int num;
size_t remainder;
- cache_estimate(gfporder, size, flags, &remainder, &num);
+ num = cache_estimate(gfporder, size, flags, &remainder);
if (!num)
continue;