diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2024-03-26 11:37:39 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-26 05:56:16 +0200 |
commit | e6100a4590bfc0a91c8e882f73538c9c0d987a46 (patch) | |
tree | d7bb8b8272e4a4fc870de1edae65f7de2010fe11 /mm/slub.c | |
parent | mm, slab: move memcg charging to post-alloc hook (diff) | |
download | linux-e6100a4590bfc0a91c8e882f73538c9c0d987a46.tar.xz linux-e6100a4590bfc0a91c8e882f73538c9c0d987a46.zip |
mm, slab: move slab_memcg hooks to mm/memcontrol.c
The hooks make multiple calls to functions in mm/memcontrol.c, including
to th current_obj_cgroup() marked __always_inline. It might be faster to
make a single call to the hook in mm/memcontrol.c instead. The hooks also
don't use almost anything from mm/slub.c. obj_full_size() can move with
the hooks and cache_vmstat_idx() to the internal mm/slab.h
Link: https://lkml.kernel.org/r/20240326-slab-memcg-v3-2-d85d2563287a@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Kees Cook <kees@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/slub.c | 103 |
1 files changed, 2 insertions, 101 deletions
diff --git a/mm/slub.c b/mm/slub.c index f83efdbe15b7..3e33ff900d35 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1859,12 +1859,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, #endif #endif /* CONFIG_SLUB_DEBUG */ -static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) -{ - return (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; -} - #ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG @@ -1923,8 +1917,8 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts, #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ __GFP_ACCOUNT | __GFP_NOFAIL) -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, - gfp_t gfp, bool new_slab) +int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab) { unsigned int objects = objs_per_slab(s, slab); unsigned long new_exts; @@ -2083,78 +2077,6 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, #endif /* CONFIG_SLAB_OBJ_EXT */ #ifdef CONFIG_MEMCG_KMEM -static inline size_t obj_full_size(struct kmem_cache *s) -{ - /* - * For each accounted object there is an extra space which is used - * to store obj_cgroup membership. Charge it too. - */ - return s->size + sizeof(struct obj_cgroup *); -} - -static bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, - struct list_lru *lru, - gfp_t flags, size_t size, - void **p) -{ - struct obj_cgroup *objcg; - struct slab *slab; - unsigned long off; - size_t i; - - /* - * The obtained objcg pointer is safe to use within the current scope, - * defined by current task or set_active_memcg() pair. - * obj_cgroup_get() is used to get a permanent reference. - */ - objcg = current_obj_cgroup(); - if (!objcg) - return true; - - /* - * slab_alloc_node() avoids the NULL check, so we might be called with a - * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill - * the whole requested size. - * return success as there's nothing to free back - */ - if (unlikely(*p == NULL)) - return true; - - flags &= gfp_allowed_mask; - - if (lru) { - int ret; - struct mem_cgroup *memcg; - - memcg = get_mem_cgroup_from_objcg(objcg); - ret = memcg_list_lru_alloc(memcg, lru, flags); - css_put(&memcg->css); - - if (ret) - return false; - } - - if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) - return false; - - for (i = 0; i < size; i++) { - slab = virt_to_slab(p[i]); - - if (!slab_obj_exts(slab) && - alloc_slab_obj_exts(slab, s, flags, false)) { - obj_cgroup_uncharge(objcg, obj_full_size(s)); - continue; - } - - off = obj_to_index(s, slab, p[i]); - obj_cgroup_get(objcg); - slab_obj_exts(slab)[off].objcg = objcg; - mod_objcg_state(objcg, slab_pgdat(slab), - cache_vmstat_idx(s), obj_full_size(s)); - } - - return true; -} static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); @@ -2181,27 +2103,6 @@ bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, return false; } -static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, - struct slabobj_ext *obj_exts) -{ - for (int i = 0; i < objects; i++) { - struct obj_cgroup *objcg; - unsigned int off; - - off = obj_to_index(s, slab, p[i]); - objcg = obj_exts[off].objcg; - if (!objcg) - continue; - - obj_exts[off].objcg = NULL; - obj_cgroup_uncharge(objcg, obj_full_size(s)); - mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), - -obj_full_size(s)); - obj_cgroup_put(objcg); - } -} - static __fastpath_inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) |