diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2022-05-19 23:08:53 +0200 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-05-19 23:08:53 +0200 |
commit | f4840ccfca25db225b3371a8f7b5770febee87c5 (patch) | |
tree | c35709fc53e78d36681620d17d410bcc0c5d5785 /mm/memcontrol.c | |
parent | mm: zswap: add basic meminfo and vmstat coverage (diff) | |
download | linux-f4840ccfca25db225b3371a8f7b5770febee87c5.tar.xz linux-f4840ccfca25db225b3371a8f7b5770febee87c5.zip |
zswap: memcg accounting
Applications can currently escape their cgroup memory containment when
zswap is enabled. This patch adds per-cgroup tracking and limiting of
zswap backend memory to rectify this.
The existing cgroup2 memory.stat file is extended to show zswap statistics
analogous to what's in meminfo and vmstat. Furthermore, two new control
files, memory.zswap.current and memory.zswap.max, are added to allow
tuning zswap usage on a per-workload basis. This is important since not
all workloads benefit from zswap equally; some even suffer compared to
disk swap when memory contents don't compress well. The optimal size of
the zswap pool, and the threshold for writeback, also depends on the size
of the workload's warm set.
The implementation doesn't use a traditional page_counter transaction.
zswap is unconventional as a memory consumer in that we only know the
amount of memory to charge once expensive compression has occurred. If
zwap is disabled or the limit is already exceeded we obviously don't want
to compress page upon page only to reject them all. Instead, the limit is
checked against current usage, then we compress and charge. This allows
some limit overrun, but not enough to matter in practice.
[hannes@cmpxchg.org: fix for CONFIG_SLOB builds]
Link: https://lkml.kernel.org/r/YnwD14zxYjUJPc2w@cmpxchg.org
[hannes@cmpxchg.org: opt out of cgroups v1]
Link: https://lkml.kernel.org/r/Yn6it9mBYFA+/lTb@cmpxchg.org
Link: https://lkml.kernel.org/r/20220510152847.230957-7-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 205 |
1 files changed, 197 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef76df7c6d12..abec50f31fe6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1398,6 +1398,10 @@ static const struct memory_stat memory_stats[] = { { "sock", MEMCG_SOCK }, { "vmalloc", MEMCG_VMALLOC }, { "shmem", NR_SHMEM }, +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) + { "zswap", MEMCG_ZSWAP_B }, + { "zswapped", MEMCG_ZSWAPPED }, +#endif { "file_mapped", NR_FILE_MAPPED }, { "file_dirty", NR_FILE_DIRTY }, { "file_writeback", NR_WRITEBACK }, @@ -1432,6 +1436,7 @@ static int memcg_page_state_unit(int item) { switch (item) { case MEMCG_PERCPU_B: + case MEMCG_ZSWAP_B: case NR_SLAB_RECLAIMABLE_B: case NR_SLAB_UNRECLAIMABLE_B: case WORKINGSET_REFAULT_ANON: @@ -1512,6 +1517,13 @@ static char *memory_stat_format(struct mem_cgroup *memcg) seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), memcg_events(memcg, PGLAZYFREED)); +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) + seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPIN), + memcg_events(memcg, ZSWPIN)); + seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPOUT), + memcg_events(memcg, ZSWPOUT)); +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), memcg_events(memcg, THP_FAULT_ALLOC)); @@ -2883,6 +2895,19 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) return page_memcg_check(folio_page(folio, 0)); } +static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) +{ + struct obj_cgroup *objcg = NULL; + + for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { + objcg = rcu_dereference(memcg->objcg); + if (objcg && obj_cgroup_tryget(objcg)) + break; + objcg = NULL; + } + return objcg; +} + __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) { struct obj_cgroup *objcg = NULL; @@ -2896,15 +2921,32 @@ __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) memcg = active_memcg(); else memcg = mem_cgroup_from_task(current); - - for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { - objcg = rcu_dereference(memcg->objcg); - if (objcg && obj_cgroup_tryget(objcg)) - break; - objcg = NULL; - } + objcg = __get_obj_cgroup_from_memcg(memcg); rcu_read_unlock(); + return objcg; +} + +struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) +{ + struct obj_cgroup *objcg; + + if (!memcg_kmem_enabled() || memcg_kmem_bypass()) + return NULL; + + if (PageMemcgKmem(page)) { + objcg = __folio_objcg(page_folio(page)); + obj_cgroup_get(objcg); + } else { + struct mem_cgroup *memcg; + rcu_read_lock(); + memcg = __folio_memcg(page_folio(page)); + if (memcg) + objcg = __get_obj_cgroup_from_memcg(memcg); + else + objcg = NULL; + rcu_read_unlock(); + } return objcg; } @@ -5142,6 +5184,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); memcg->soft_limit = PAGE_COUNTER_MAX; +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) + memcg->zswap_max = PAGE_COUNTER_MAX; +#endif page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); if (parent) { memcg->swappiness = mem_cgroup_swappiness(parent); @@ -7421,6 +7466,148 @@ static struct cftype memsw_files[] = { { }, /* terminate */ }; +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) +/** + * obj_cgroup_may_zswap - check if this cgroup can zswap + * @objcg: the object cgroup + * + * Check if the hierarchical zswap limit has been reached. + * + * This doesn't check for specific headroom, and it is not atomic + * either. But with zswap, the size of the allocation is only known + * once compression has occured, and this optimistic pre-check avoids + * spending cycles on compression when there is already no room left + * or zswap is disabled altogether somewhere in the hierarchy. + */ +bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) +{ + struct mem_cgroup *memcg, *original_memcg; + bool ret = true; + + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return true; + + original_memcg = get_mem_cgroup_from_objcg(objcg); + for (memcg = original_memcg; memcg != root_mem_cgroup; + memcg = parent_mem_cgroup(memcg)) { + unsigned long max = READ_ONCE(memcg->zswap_max); + unsigned long pages; + + if (max == PAGE_COUNTER_MAX) + continue; + if (max == 0) { + ret = false; + break; + } + + cgroup_rstat_flush(memcg->css.cgroup); + pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; + if (pages < max) + continue; + ret = false; + break; + } + mem_cgroup_put(original_memcg); + return ret; +} + +/** + * obj_cgroup_charge_zswap - charge compression backend memory + * @objcg: the object cgroup + * @size: size of compressed object + * + * This forces the charge after obj_cgroup_may_swap() allowed + * compression and storage in zwap for this cgroup to go ahead. + */ +void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) +{ + struct mem_cgroup *memcg; + + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return; + + VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); + + /* PF_MEMALLOC context, charging must succeed */ + if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) + VM_WARN_ON_ONCE(1); + + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); + mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); + rcu_read_unlock(); +} + +/** + * obj_cgroup_uncharge_zswap - uncharge compression backend memory + * @objcg: the object cgroup + * @size: size of compressed object + * + * Uncharges zswap memory on page in. + */ +void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) +{ + struct mem_cgroup *memcg; + + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return; + + obj_cgroup_uncharge(objcg, size); + + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); + mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); + rcu_read_unlock(); +} + +static u64 zswap_current_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + cgroup_rstat_flush(css->cgroup); + return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B); +} + +static int zswap_max_show(struct seq_file *m, void *v) +{ + return seq_puts_memcg_tunable(m, + READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); +} + +static ssize_t zswap_max_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + unsigned long max; + int err; + + buf = strstrip(buf); + err = page_counter_memparse(buf, "max", &max); + if (err) + return err; + + xchg(&memcg->zswap_max, max); + + return nbytes; +} + +static struct cftype zswap_files[] = { + { + .name = "zswap.current", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = zswap_current_read, + }, + { + .name = "zswap.max", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = zswap_max_show, + .write = zswap_max_write, + }, + { } /* terminate */ +}; +#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */ + /* * If mem_cgroup_swap_init() is implemented as a subsys_initcall() * instead of a core_initcall(), this could mean cgroup_memory_noswap still @@ -7439,7 +7626,9 @@ static int __init mem_cgroup_swap_init(void) WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); - +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) + WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); +#endif return 0; } core_initcall(mem_cgroup_swap_init); |