summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-07-07 09:14:14 +0200
committerPekka Enberg <penberg@cs.helsinki.fi>2009-07-10 08:52:55 +0200
commitfa5ec8a1f66f3c2a3af723abcf8085509c9ee682 (patch)
treebe8e06f8ace38ed40ec2af4465dd1fffbc4b4f09 /mm/slub.c
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6 (diff)
downloadlinux-fa5ec8a1f66f3c2a3af723abcf8085509c9ee682.tar.xz
linux-fa5ec8a1f66f3c2a3af723abcf8085509c9ee682.zip
slub: add option to disable higher order debugging slabs
When debugging is enabled, slub requires that additional metadata be stored in slabs for certain options: SLAB_RED_ZONE, SLAB_POISON, and SLAB_STORE_USER. Consequently, it may require that the minimum possible slab order needed to allocate a single object be greater when using these options. The most notable example is for objects that are PAGE_SIZE bytes in size. Higher minimum slab orders may cause page allocation failures when oom or under heavy fragmentation. This patch adds a new slub_debug option, which disables debugging by default for caches that would have resulted in higher minimum orders: slub_debug=O When this option is used on systems with 4K pages, kmalloc-4096, for example, will not have debugging enabled by default even if CONFIG_SLUB_DEBUG_ON is defined because it would have resulted in a order-1 minimum slab order. Reported-by: Larry Finger <Larry.Finger@lwfinger.net> Tested-by: Larry Finger <Larry.Finger@lwfinger.net> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c41
1 files changed, 38 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a9201d83178b..466089cd5deb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -142,6 +142,13 @@
SLAB_POISON | SLAB_STORE_USER)
/*
+ * Debugging flags that require metadata to be stored in the slab, up to
+ * DEBUG_SIZE in size.
+ */
+#define DEBUG_SIZE_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
+#define DEBUG_SIZE (3 * sizeof(void *) + 2 * sizeof(struct track))
+
+/*
* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
@@ -326,6 +333,7 @@ static int slub_debug;
#endif
static char *slub_debug_slabs;
+static int disable_higher_order_debug;
/*
* Object debugging
@@ -977,6 +985,15 @@ static int __init setup_slub_debug(char *str)
*/
goto check_slabs;
+ if (tolower(*str) == 'o') {
+ /*
+ * Avoid enabling debugging on caches if its minimum order
+ * would increase as a result.
+ */
+ disable_higher_order_debug = 1;
+ goto out;
+ }
+
slub_debug = 0;
if (*str == '-')
/*
@@ -1023,13 +1040,27 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
+ int debug_flags = slub_debug;
+
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
- flags |= slub_debug;
+ if (debug_flags) {
+ if (slub_debug_slabs &&
+ strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))
+ goto out;
+
+ /*
+ * Disable debugging that increases slab size if the minimum
+ * slab order would have increased as a result.
+ */
+ if (disable_higher_order_debug &&
+ get_order(objsize + DEBUG_SIZE) > get_order(objsize))
+ debug_flags &= ~DEBUG_SIZE_FLAGS;
+ flags |= debug_flags;
+ }
+out:
return flags;
}
#else
@@ -1561,6 +1592,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
"default order: %d, min order: %d\n", s->name, s->objsize,
s->size, oo_order(s->oo), oo_order(s->min));
+ if (oo_order(s->min) > get_order(s->objsize))
+ printk(KERN_WARNING " %s debugging increased min order, use "
+ "slub_debug=O to disable.\n", s->name);
+
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long nr_slabs;