summaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2012-10-03 08:56:37 +0200
committerPekka Enberg <penberg@kernel.org>2012-10-03 08:56:37 +0200
commitf4178cdddd4cb860a17f363fe13264fff03da7f2 (patch)
tree5ca8dc6bb09bcb2c4b959b60712d7a3f60c7a43f /mm/slob.c
parentMerge branch 'slab/next' into slab/for-linus (diff)
parentslab: Only define slab_error for DEBUG (diff)
downloadlinux-f4178cdddd4cb860a17f363fe13264fff03da7f2.tar.xz
linux-f4178cdddd4cb860a17f363fe13264fff03da7f2.zip
Merge branch 'slab/common-for-cgroups' into slab/for-linus
Fix up a trivial conflict with NUMA_NO_NODE cleanups. Conflicts: mm/slob.c Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c60
1 files changed, 27 insertions, 33 deletions
diff --git a/mm/slob.c b/mm/slob.c
index dd47d16d57b6..f3a5ced392d7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -529,44 +529,24 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
- struct kmem_cache *c;
+ size_t align = c->size;
- c = slob_alloc(sizeof(struct kmem_cache),
- GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);
-
- if (c) {
- c->name = name;
- c->size = size;
- if (flags & SLAB_DESTROY_BY_RCU) {
- /* leave room for rcu footer at the end of object */
- c->size += sizeof(struct slob_rcu);
- }
- c->flags = flags;
- c->ctor = ctor;
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
-
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
- c->refcount = 1;
+ if (flags & SLAB_DESTROY_BY_RCU) {
+ /* leave room for rcu footer at the end of object */
+ c->size += sizeof(struct slob_rcu);
}
- return c;
-}
+ c->flags = flags;
+ /* ignore alignment unless it's forced */
+ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+ if (c->align < ARCH_SLAB_MINALIGN)
+ c->align = ARCH_SLAB_MINALIGN;
+ if (c->align < align)
+ c->align = align;
-void kmem_cache_destroy(struct kmem_cache *c)
-{
- kmemleak_free(c);
- if (c->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
- slob_free(c, sizeof(struct kmem_cache));
+ return 0;
}
-EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
@@ -634,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);
+int __kmem_cache_shutdown(struct kmem_cache *c)
+{
+ /* No way to check for remaining objects */
+ return 0;
+}
+
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
+struct kmem_cache kmem_cache_boot = {
+ .name = "kmem_cache",
+ .size = sizeof(struct kmem_cache),
+ .flags = SLAB_PANIC,
+ .align = ARCH_KMALLOC_MINALIGN,
+};
+
void __init kmem_cache_init(void)
{
+ kmem_cache = &kmem_cache_boot;
slab_state = UP;
}