summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-04-08 00:39:26 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 01:36:12 +0200
commit794b1248be4e7e157f5535c3ee49168aa4643349 (patch)
tree93bfa09b76abff9ec2f9d154bb43971bcce2f574
parentmemcg, slab: cleanup memcg cache creation (diff)
downloadlinux-794b1248be4e7e157f5535c3ee49168aa4643349.tar.xz
linux-794b1248be4e7e157f5535c3ee49168aa4643349.zip
memcg, slab: separate memcg vs root cache creation paths
Memcg-awareness turned kmem_cache_create() into a dirty interweaving of memcg-only and except-for-memcg calls. To clean this up, let's move the code responsible for memcg cache creation to a separate function. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/slab.h6
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/slab_common.c187
4 files changed, 111 insertions, 95 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab7f02884983..02d3072841e9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -638,12 +638,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
-static inline char *memcg_create_cache_name(struct mem_cgroup *memcg,
- struct kmem_cache *root_cache)
-{
- return NULL;
-}
-
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
struct kmem_cache *s, struct kmem_cache *root_cache)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b5b2df60299e..3dd389aa91c7 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -115,9 +115,9 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
-struct kmem_cache *
-kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
- unsigned long, void (*)(void *), struct kmem_cache *);
+#ifdef CONFIG_MEMCG_KMEM
+void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
+#endif
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 32c7342df4bf..451523c3bd4e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3395,13 +3395,8 @@ static void memcg_create_cache_work_func(struct work_struct *w)
struct create_work *cw = container_of(w, struct create_work, work);
struct mem_cgroup *memcg = cw->memcg;
struct kmem_cache *cachep = cw->cachep;
- struct kmem_cache *new;
- new = kmem_cache_create_memcg(memcg, cachep->name,
- cachep->object_size, cachep->align,
- cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
- if (new)
- new->allocflags |= __GFP_KMEMCG;
+ kmem_cache_create_memcg(memcg, cachep);
css_put(&memcg->css);
kfree(cw);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 11857abf7057..ccc012f00126 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -29,8 +29,7 @@ DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
-static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
- size_t size)
+static int kmem_cache_sanity_check(const char *name, size_t size)
{
struct kmem_cache *s = NULL;
@@ -57,13 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
}
#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
- /*
- * For simplicity, we won't check this in the list of memcg
- * caches. We have control over memcg naming, and if there
- * aren't duplicates in the global list, there won't be any
- * duplicates in the memcg lists as well.
- */
- if (!memcg && !strcmp(s->name, name)) {
+ if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
@@ -77,8 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
return 0;
}
#else
-static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
- const char *name, size_t size)
+static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
return 0;
}
@@ -139,6 +131,46 @@ unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *));
}
+static struct kmem_cache *
+do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *),
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+{
+ struct kmem_cache *s;
+ int err;
+
+ err = -ENOMEM;
+ s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
+ if (!s)
+ goto out;
+
+ s->name = name;
+ s->object_size = object_size;
+ s->size = size;
+ s->align = align;
+ s->ctor = ctor;
+
+ err = memcg_alloc_cache_params(memcg, s, root_cache);
+ if (err)
+ goto out_free_cache;
+
+ err = __kmem_cache_create(s, flags);
+ if (err)
+ goto out_free_cache;
+
+ s->refcount = 1;
+ list_add(&s->list, &slab_caches);
+ memcg_register_cache(s);
+out:
+ if (err)
+ return ERR_PTR(err);
+ return s;
+
+out_free_cache:
+ memcg_free_cache_params(s);
+ kfree(s);
+ goto out;
+}
/*
* kmem_cache_create - Create a cache.
@@ -164,34 +196,21 @@ unsigned long calculate_alignment(unsigned long flags,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-
struct kmem_cache *
-kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *),
- struct kmem_cache *parent_cache)
+kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
{
- struct kmem_cache *s = NULL;
+ struct kmem_cache *s;
+ char *cache_name;
int err;
get_online_cpus();
mutex_lock(&slab_mutex);
- err = kmem_cache_sanity_check(memcg, name, size);
+ err = kmem_cache_sanity_check(name, size);
if (err)
goto out_unlock;
- if (memcg) {
- /*
- * Since per-memcg caches are created asynchronously on first
- * allocation (see memcg_kmem_get_cache()), several threads can
- * try to create the same cache, but only one of them may
- * succeed. Therefore if we get here and see the cache has
- * already been created, we silently return NULL.
- */
- if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
- goto out_unlock;
- }
-
/*
* Some allocators will constraint the set of valid flags to a subset
* of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -200,55 +219,29 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
*/
flags &= CACHE_CREATE_MASK;
- if (!memcg) {
- s = __kmem_cache_alias(name, size, align, flags, ctor);
- if (s)
- goto out_unlock;
- }
-
- err = -ENOMEM;
- s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
- if (!s)
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
+ if (s)
goto out_unlock;
- s->object_size = s->size = size;
- s->align = calculate_alignment(flags, align, size);
- s->ctor = ctor;
-
- if (memcg)
- s->name = memcg_create_cache_name(memcg, parent_cache);
- else
- s->name = kstrdup(name, GFP_KERNEL);
- if (!s->name)
- goto out_free_cache;
-
- err = memcg_alloc_cache_params(memcg, s, parent_cache);
- if (err)
- goto out_free_cache;
-
- err = __kmem_cache_create(s, flags);
- if (err)
- goto out_free_cache;
+ cache_name = kstrdup(name, GFP_KERNEL);
+ if (!cache_name) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
- s->refcount = 1;
- list_add(&s->list, &slab_caches);
- memcg_register_cache(s);
+ s = do_kmem_cache_create(cache_name, size, size,
+ calculate_alignment(flags, align, size),
+ flags, ctor, NULL, NULL);
+ if (IS_ERR(s)) {
+ err = PTR_ERR(s);
+ kfree(cache_name);
+ }
out_unlock:
mutex_unlock(&slab_mutex);
put_online_cpus();
if (err) {
- /*
- * There is no point in flooding logs with warnings or
- * especially crashing the system if we fail to create a cache
- * for a memcg. In this case we will be accounting the memcg
- * allocation to the root cgroup until we succeed to create its
- * own cache, but it isn't that critical.
- */
- if (!memcg)
- return NULL;
-
if (flags & SLAB_PANIC)
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
name, err);
@@ -260,21 +253,55 @@ out_unlock:
return NULL;
}
return s;
-
-out_free_cache:
- memcg_free_cache_params(s);
- kfree(s->name);
- kmem_cache_free(kmem_cache, s);
- goto out_unlock;
}
+EXPORT_SYMBOL(kmem_cache_create);
-struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * kmem_cache_create_memcg - Create a cache for a memory cgroup.
+ * @memcg: The memory cgroup the new cache is for.
+ * @root_cache: The parent of the new cache.
+ *
+ * This function attempts to create a kmem cache that will serve allocation
+ * requests going from @memcg to @root_cache. The new cache inherits properties
+ * from its parent.
+ */
+void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
- return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
+ struct kmem_cache *s;
+ char *cache_name;
+
+ get_online_cpus();
+ mutex_lock(&slab_mutex);
+
+ /*
+ * Since per-memcg caches are created asynchronously on first
+ * allocation (see memcg_kmem_get_cache()), several threads can try to
+ * create the same cache, but only one of them may succeed.
+ */
+ if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
+ goto out_unlock;
+
+ cache_name = memcg_create_cache_name(memcg, root_cache);
+ if (!cache_name)
+ goto out_unlock;
+
+ s = do_kmem_cache_create(cache_name, root_cache->object_size,
+ root_cache->size, root_cache->align,
+ root_cache->flags, root_cache->ctor,
+ memcg, root_cache);
+ if (IS_ERR(s)) {
+ kfree(cache_name);
+ goto out_unlock;
+ }
+
+ s->allocflags |= __GFP_KMEMCG;
+
+out_unlock:
+ mutex_unlock(&slab_mutex);
+ put_online_cpus();
}
-EXPORT_SYMBOL(kmem_cache_create);
+#endif /* CONFIG_MEMCG_KMEM */
void kmem_cache_destroy(struct kmem_cache *s)
{