diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 13:03:22 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 19:23:01 +0200 |
commit | 6cb8f91320d3e720351c21741da795fed580b21b (patch) | |
tree | c9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d | |
parent | Slab allocators: consolidate code for krealloc in mm/util.c (diff) | |
download | linux-6cb8f91320d3e720351c21741da795fed580b21b.tar.xz linux-6cb8f91320d3e720351c21741da795fed580b21b.zip |
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the
allocators. Move ZERO_SIZE_PTR related stuff into slab.h.
Make ZERO_SIZE_PTR work for all slab allocators and get rid of the
WARN_ON_ONCE(size == 0) that is still remaining in SLAB.
Make slub return NULL like the other allocators if a too large memory segment
is requested via __kmalloc.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slab.h | 13 | ||||
-rw-r--r-- | include/linux/slab_def.h | 12 | ||||
-rw-r--r-- | include/linux/slub_def.h | 12 | ||||
-rw-r--r-- | mm/slab.c | 13 | ||||
-rw-r--r-- | mm/slob.c | 11 | ||||
-rw-r--r-- | mm/slub.c | 29 | ||||
-rw-r--r-- | mm/util.c | 2 |
7 files changed, 57 insertions, 35 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 27402fea9b79..0289ec89300a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -31,6 +31,19 @@ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ /* + * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. + * + * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. + * + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +#define ZERO_SIZE_PTR ((void *)16) + +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \ + (unsigned long)ZERO_SIZE_PTR) + +/* * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 365d036c454a..16e814ffab8d 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ @@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ @@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a582f6771525..579b0a22858e 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) #define SLUB_DMA 0 #endif - -/* - * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. - * - * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. - * - * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. - * Both make kfree a no-op. - */ -#define ZERO_SIZE_PTR ((void *)16) - - void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); diff --git a/mm/slab.c b/mm/slab.c index 4bd8a53091b7..d2cd304fd8af 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif + if (!size) + return ZERO_SIZE_PTR; + while (size > csizep->cs_size) csizep++; @@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * this should not happen at all. * But leave a BUG_ON for some lucky dude. */ - BUG_ON(!cachep->slabp_cache); + BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } cachep->ctor = ctor; cachep->name = name; @@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) struct kmem_cache *cachep; cachep = kmem_find_general_cachep(size, flags); - if (unlikely(cachep == NULL)) - return NULL; + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; return kmem_cache_alloc_node(cachep, flags, node); } @@ -3760,7 +3763,7 @@ void kfree(const void *objp) struct kmem_cache *c; unsigned long flags; - if (unlikely(!objp)) + if (unlikely(ZERO_OR_NULL_PTR(objp))) return; local_irq_save(flags); kfree_debugcheck(objp); @@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = { */ size_t ksize(const void *objp) { - if (unlikely(objp == NULL)) + if (unlikely(ZERO_OR_NULL_PTR(objp))) return 0; return obj_size(virt_to_cache(objp)); diff --git a/mm/slob.c b/mm/slob.c index 154e7bdf3544..41d32c3c0be4 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -347,7 +347,7 @@ static void slob_free(void *block, int size) slobidx_t units; unsigned long flags; - if (!block) + if (ZERO_OR_NULL_PTR(block)) return; BUG_ON(!size); @@ -424,10 +424,13 @@ out: void *__kmalloc_node(size_t size, gfp_t gfp, int node) { + unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); if (size < PAGE_SIZE - align) { - unsigned int *m; + if (!size) + return ZERO_SIZE_PTR; + m = slob_alloc(size + align, gfp, align, node); if (m) *m = size; @@ -450,7 +453,7 @@ void kfree(const void *block) { struct slob_page *sp; - if (!block) + if (ZERO_OR_NULL_PTR(block)) return; sp = (struct slob_page *)virt_to_page(block); @@ -468,7 +471,7 @@ size_t ksize(const void *block) { struct slob_page *sp; - if (!block) + if (ZERO_OR_NULL_PTR(block)) return 0; sp = (struct slob_page *)virt_to_page(block); diff --git a/mm/slub.c b/mm/slub.c index 1b0a95d75dbb..548d78df81e1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) int index = kmalloc_index(size); if (!index) - return NULL; + return ZERO_SIZE_PTR; /* Allocation too large? */ - BUG_ON(index < 0); + if (index < 0) + return NULL; #ifdef CONFIG_ZONE_DMA if ((flags & SLUB_DMA)) { @@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s = get_slab(size, flags); - if (s) - return slab_alloc(s, flags, -1, __builtin_return_address(0)); - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; + + return slab_alloc(s, flags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc); @@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s = get_slab(size, flags); - if (s) - return slab_alloc(s, flags, node, __builtin_return_address(0)); - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; + + return slab_alloc(s, flags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(__kmalloc_node); #endif @@ -2378,7 +2381,7 @@ void kfree(const void *x) * this comparison would be true for all "negative" pointers * (which would cover the whole upper half of the address space). */ - if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) + if (ZERO_OR_NULL_PTR(x)) return; page = virt_to_head_page(x); @@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - if (!s) - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; return slab_alloc(s, gfpflags, -1, caller); } @@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s = get_slab(size, gfpflags); - if (!s) - return ZERO_SIZE_PTR; + if (ZERO_OR_NULL_PTR(s)) + return s; return slab_alloc(s, gfpflags, node, caller); } diff --git a/mm/util.c b/mm/util.c index 18396ea63ee6..f2f21b775516 100644 --- a/mm/util.c +++ b/mm/util.c @@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) if (unlikely(!new_size)) { kfree(p); - return NULL; + return ZERO_SIZE_PTR; } ks = ksize(p); |