summaryrefslogtreecommitdiffstats
path: root/mm/percpu-internal.h
diff options
context:
space:
mode:
authorYafang Shao <laoar.shao@gmail.com>2023-02-14 16:35:49 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-17 05:43:55 +0100
commit2ef8ed7ddd2e6e69da7802be51af8ad71326a74f (patch)
treeabf089b7cce04ead6f17bb66efa8c03257a6edd3 /mm/percpu-internal.h
parentmaple_tree: reduce stack usage with gcc-9 and earlier (diff)
downloadlinux-2ef8ed7ddd2e6e69da7802be51af8ad71326a74f.tar.xz
linux-2ef8ed7ddd2e6e69da7802be51af8ad71326a74f.zip
mm: percpu: fix incorrect size in pcpu_obj_full_size()
The extra space which is used to store the obj_cgroup membership is only valid when kmemcg is enabled. The kmemcg can be disabled via the kernel parameter "cgroup.memory=nokmem" at boot time. This helper is also used in non-memcg code, for example the tracepoint, so we should fix it. It was found by code review when I was implementing bpf memory usage[1]. No real issue happens in production environment. [1]. https://lwn.net/Articles/921991/ Link: https://lkml.kernel.org/r/20230214153549.12291-1-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Dennis Zhou <dennis@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Vasily Averin <vvs@openvz.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/percpu-internal.h')
-rw-r--r--mm/percpu-internal.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index 70b1ea23f4d2..f9847c131998 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/percpu.h>
+#include <linux/memcontrol.h>
/*
* pcpu_block_md is the metadata block struct.
@@ -118,14 +119,15 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
* @size: size of area to allocate in bytes
*
* For each accounted object there is an extra space which is used to store
- * obj_cgroup membership. Charge it too.
+ * obj_cgroup membership if kmemcg is not disabled. Charge it too.
*/
static inline size_t pcpu_obj_full_size(size_t size)
{
size_t extra_size = 0;
#ifdef CONFIG_MEMCG_KMEM
- extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+ if (!mem_cgroup_kmem_disabled())
+ extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
#endif
return size * num_possible_cpus() + extra_size;