diff options
author | Muchun Song <songmuchun@bytedance.com> | 2022-03-22 22:40:56 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-22 23:57:03 +0100 |
commit | 88f2ef73fd66491a2f9a82373d22ca6540f23c62 (patch) | |
tree | 01f07916f6ba0c16f768702ab8176c07c7b9d6c1 /mm/slab.h | |
parent | mm: list_lru: transpose the array of per-node per-memcg lru lists (diff) | |
download | linux-88f2ef73fd66491a2f9a82373d22ca6540f23c62.tar.xz linux-88f2ef73fd66491a2f9a82373d22ca6540f23c62.zip |
mm: introduce kmem_cache_alloc_lru
We currently allocate scope for every memcg to be able to tracked on
every superblock instantiated in the system, regardless of whether that
superblock is even accessible to that memcg.
These huge memcg counts come from container hosts where memcgs are
confined to just a small subset of the total number of superblocks that
instantiated at any given point in time.
For these systems with huge container counts, list_lru does not need the
capability of tracking every memcg on every superblock. What it comes
down to is that adding the memcg to the list_lru at the first insert.
So introduce kmem_cache_alloc_lru to allocate objects and its list_lru.
In the later patch, we will convert all inode and dentry allocation from
kmem_cache_alloc to kmem_cache_alloc_lru.
Link: https://lkml.kernel.org/r/20220228122126.37293-3-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Alex Shi <alexs@kernel.org>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Fam Zheng <fam.zheng@bytedance.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kari Argillander <kari.argillander@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r-- | mm/slab.h | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/mm/slab.h b/mm/slab.h index c7f2abc2b154..fd7ae2024897 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -231,6 +231,7 @@ struct kmem_cache { #include <linux/kmemleak.h> #include <linux/random.h> #include <linux/sched/mm.h> +#include <linux/list_lru.h> /* * State of the slab allocator. @@ -472,6 +473,7 @@ static inline size_t obj_full_size(struct kmem_cache *s) * Returns false if the allocation should fail. */ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t objects, gfp_t flags) { @@ -487,13 +489,26 @@ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, if (!objcg) return true; - if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { - obj_cgroup_put(objcg); - return false; + if (lru) { + int ret; + struct mem_cgroup *memcg; + + memcg = get_mem_cgroup_from_objcg(objcg); + ret = memcg_list_lru_alloc(memcg, lru, flags); + css_put(&memcg->css); + + if (ret) + goto out; } + if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) + goto out; + *objcgp = objcg; return true; +out: + obj_cgroup_put(objcg); + return false; } static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, @@ -598,6 +613,7 @@ static inline void memcg_free_slab_cgroups(struct slab *slab) } static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t objects, gfp_t flags) { @@ -697,6 +713,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t size, gfp_t flags) { @@ -707,7 +724,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (should_failslab(s, flags)) return NULL; - if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) + if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) return NULL; return s; |