summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-01-08 10:00:36 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 05:12:39 +0100
commit4d268eba1187ef66844a6a33b9431e5d0dadd4ad (patch)
tree575aa29016688a07b2a80132a15cc8b5a5027f60 /mm
parent[PATCH] slab: extract slabinfo header printing to separate function (diff)
downloadlinux-4d268eba1187ef66844a6a33b9431e5d0dadd4ad.tar.xz
linux-4d268eba1187ef66844a6a33b9431e5d0dadd4ad.zip
[PATCH] slab: extract slab order calculation to separate function
This patch moves the ugly loop that determines the 'optimal' size (page order) of cache slabs from kmem_cache_create() to a separate function and cleans it up a bit. Thanks to Matthew Wilcox for the help with this patch. Signed-off-by: Matthew Dobson <colpatch@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c89
1 files changed, 49 insertions, 40 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3d3b5a46854f..2551b1eeadb3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1474,6 +1474,53 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
}
/**
+ * calculate_slab_order - calculate size (page order) of slabs and the number
+ * of objects per slab.
+ *
+ * This could be made much more intelligent. For now, try to avoid using
+ * high order pages for slabs. When the gfp() functions are more friendly
+ * towards high-order requests, this should be changed.
+ */
+static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
+ size_t align, gfp_t flags)
+{
+ size_t left_over = 0;
+
+ for ( ; ; cachep->gfporder++) {
+ unsigned int num;
+ size_t remainder;
+
+ if (cachep->gfporder > MAX_GFP_ORDER) {
+ cachep->num = 0;
+ break;
+ }
+
+ cache_estimate(cachep->gfporder, size, align, flags,
+ &remainder, &num);
+ if (!num)
+ continue;
+ /* More than offslab_limit objects will cause problems */
+ if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
+ break;
+
+ cachep->num = num;
+ left_over = remainder;
+
+ /*
+ * Large number of objects is good, but very large slabs are
+ * currently bad for the gfp()s.
+ */
+ if (cachep->gfporder >= slab_break_gfp_order)
+ break;
+
+ if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
+ /* Acceptable internal fragmentation */
+ break;
+ }
+ return left_over;
+}
+
+/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
@@ -1682,46 +1729,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->gfporder = 0;
cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num);
- } else {
- /*
- * Calculate size (in pages) of slabs, and the num of objs per
- * slab. This could be made much more intelligent. For now,
- * try to avoid using high page-orders for slabs. When the
- * gfp() funcs are more friendly towards high-order requests,
- * this should be changed.
- */
- do {
- unsigned int break_flag = 0;
-cal_wastage:
- cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- if (break_flag)
- break;
- if (cachep->gfporder >= MAX_GFP_ORDER)
- break;
- if (!cachep->num)
- goto next;
- if (flags & CFLGS_OFF_SLAB &&
- cachep->num > offslab_limit) {
- /* This num of objs will cause problems. */
- cachep->gfporder--;
- break_flag++;
- goto cal_wastage;
- }
-
- /*
- * Large num of objs is good, but v. large slabs are
- * currently bad for the gfp()s.
- */
- if (cachep->gfporder >= slab_break_gfp_order)
- break;
-
- if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
- break; /* Acceptable internal fragmentation. */
-next:
- cachep->gfporder++;
- } while (1);
- }
+ } else
+ left_over = calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk("kmem_cache_create: couldn't create cache %s.\n", name);