diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-20 12:15:30 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-20 12:15:30 +0100 |
commit | 057685cf57066bc8aaed68de1b1970e12f0075d2 (patch) | |
tree | e7fea68e7080d6366b45775028bfbdf6237ecda5 | |
parent | Merge branches 'tracing/function-graph-tracer' and 'linus' into tracing/core (diff) | |
parent | SLUB: Introduce and use SLUB_MAX_SIZE and SLUB_PAGE_SHIFT constants (diff) | |
download | linux-057685cf57066bc8aaed68de1b1970e12f0075d2.tar.xz linux-057685cf57066bc8aaed68de1b1970e12f0075d2.zip |
Merge branch 'for-ingo' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6 into tracing/kmemtrace
Conflicts:
mm/slub.c
-rw-r--r-- | include/linux/slub_def.h | 19 | ||||
-rw-r--r-- | mm/slub.c | 16 |
2 files changed, 24 insertions, 11 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6b657f7dcb2b..9e3a575b2c30 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -122,10 +122,23 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* + * Maximum kmalloc object size handled by SLUB. Larger object allocations + * are passed through to the page allocator. The page allocator "fastpath" + * is relatively slow so we need this value sufficiently high so that + * performance critical objects are allocated through the SLUB fastpath. + * + * This should be dropped to PAGE_SIZE / 2 once the page allocator + * "fastpath" becomes competitive with the slab allocator fastpaths. + */ +#define SLUB_MAX_SIZE (PAGE_SIZE) + +#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) + +/* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; +extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -231,7 +244,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *ret; if (__builtin_constant_p(size)) { - if (size > PAGE_SIZE) + if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { @@ -275,7 +288,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) void *ret; if (__builtin_constant_p(size) && - size <= PAGE_SIZE && !(flags & SLUB_DMA)) { + size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) diff --git a/mm/slub.c b/mm/slub.c index 3525e7b21d19..6de5e07c8850 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; +struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) @@ -2568,7 +2568,7 @@ panic: } #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; +static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; static void sysfs_add_func(struct work_struct *w) { @@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; - if (unlikely(size > PAGE_SIZE)) + if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; - if (unlikely(size > PAGE_SIZE)) { + if (unlikely(size > SLUB_MAX_SIZE)) { ret = kmalloc_large_node(size, flags, node); kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, @@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); @@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; - if (unlikely(size > PAGE_SIZE)) + if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); @@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; - if (unlikely(size > PAGE_SIZE)) + if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); |