diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2014-01-24 00:52:58 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-24 01:36:51 +0100 |
commit | 1aa13254259bdef0bca723849ab3bab308d2f0c3 (patch) | |
tree | 4d2207788bf99f908ffed696f9d58c96eaf0b8b0 /mm/memcontrol.c | |
parent | memcg, slab: kmem_cache_create_memcg(): fix memleak on fail path (diff) | |
download | linux-1aa13254259bdef0bca723849ab3bab308d2f0c3.tar.xz linux-1aa13254259bdef0bca723849ab3bab308d2f0c3.zip |
memcg, slab: clean up memcg cache initialization/destruction
Currently, we have rather a messy function set relating to per-memcg
kmem cache initialization/destruction.
Per-memcg caches are created in memcg_create_kmem_cache(). This
function calls kmem_cache_create_memcg() to allocate and initialize a
kmem cache and then "registers" the new cache in the
memcg_params::memcg_caches array of the parent cache.
During its work-flow, kmem_cache_create_memcg() executes the following
memcg-related functions:
- memcg_alloc_cache_params(), to initialize memcg_params of the newly
created cache;
- memcg_cache_list_add(), to add the new cache to the memcg_slab_caches
list.
On the other hand, kmem_cache_destroy() called on a cache destruction
only calls memcg_release_cache(), which does all the work: it cleans the
reference to the cache in its parent's memcg_params::memcg_caches,
removes the cache from the memcg_slab_caches list, and frees
memcg_params.
Such an inconsistency between destruction and initialization paths make
the code difficult to read, so let's clean this up a bit.
This patch moves all the code relating to registration of per-memcg
caches (adding to memcg list, setting the pointer to a cache from its
parent) to the newly created memcg_register_cache() and
memcg_unregister_cache() functions making the initialization and
destruction paths look symmetrical.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 64 |
1 files changed, 30 insertions, 34 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8ebe71f872d..739383cd3f70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) css_put(&memcg->css); } -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) -{ - if (!memcg) - return; - - mutex_lock(&memcg->slab_caches_mutex); - list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); - mutex_unlock(&memcg->slab_caches_mutex); -} - /* * helper for acessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } -void memcg_release_cache(struct kmem_cache *s) +void memcg_register_cache(struct kmem_cache *s) { struct kmem_cache *root; struct mem_cgroup *memcg; int id; + if (is_root_cache(s)) + return; + + root = s->memcg_params->root_cache; + memcg = s->memcg_params->memcg; + id = memcg_cache_id(memcg); + + css_get(&memcg->css); + + mutex_lock(&memcg->slab_caches_mutex); + list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); + mutex_unlock(&memcg->slab_caches_mutex); + + root->memcg_params->memcg_caches[id] = s; /* - * This happens, for instance, when a root cache goes away before we - * add any memcg. + * the readers won't lock, make sure everybody sees the updated value, + * so they won't put stuff in the queue again for no reason */ - if (!s->memcg_params) - return; + wmb(); +} - if (s->memcg_params->is_root_cache) - goto out; +void memcg_unregister_cache(struct kmem_cache *s) +{ + struct kmem_cache *root; + struct mem_cgroup *memcg; + int id; + + if (is_root_cache(s)) + return; memcg = s->memcg_params->memcg; id = memcg_cache_id(memcg); @@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s) mutex_unlock(&memcg->slab_caches_mutex); css_put(&memcg->css); -out: - memcg_free_cache_params(s); } /* @@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, mutex_lock(&memcg_cache_mutex); new_cachep = cache_from_memcg_idx(cachep, idx); - if (new_cachep) { - css_put(&memcg->css); + if (new_cachep) goto out; - } new_cachep = kmem_cache_dup(memcg, cachep); - if (new_cachep == NULL) { + if (new_cachep == NULL) new_cachep = cachep; - css_put(&memcg->css); - goto out; - } - - atomic_set(&new_cachep->memcg_params->nr_pages , 0); - cachep->memcg_params->memcg_caches[idx] = new_cachep; - /* - * the readers won't lock, make sure everybody sees the updated value, - * so they won't put stuff in the queue again for no reason - */ - wmb(); out: mutex_unlock(&memcg_cache_mutex); return new_cachep; @@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) cw = container_of(w, struct create_work, work); memcg_create_kmem_cache(cw->memcg, cw->cachep); + css_put(&cw->memcg->css); kfree(cw); } |