diff options
author | Shakeel Butt <shakeel.butt@linux.dev> | 2024-08-15 07:04:50 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-02 05:26:19 +0200 |
commit | f7d49ba03ae7555e6337b9d39eb080b4a064eab8 (patch) | |
tree | d9522e0ddfe8b94961b964e1edb489f44fb648d7 /mm/memcontrol.c | |
parent | memcg: move mem_cgroup_charge_statistics to v1 code (diff) | |
download | linux-f7d49ba03ae7555e6337b9d39eb080b4a064eab8.tar.xz linux-f7d49ba03ae7555e6337b9d39eb080b4a064eab8.zip |
memcg: move v1 events and statistics code to v1 file
Currently the common code path for charge commit, swapout and batched
uncharge are executing v1 only code which is completely useless for the v2
deployments where CONFIG_MEMCG_V1 is disabled. In addition, it is mucking
with IRQs which might be slow on some architectures. Let's move all of
this code to v1 only code and remove them from v2 only deployments.
Link: https://lkml.kernel.org/r/20240815050453.1298138-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: T.J. Mercier <tjmercier@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 33 |
1 files changed, 4 insertions, 29 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 059c9e0ba350..6b4b51caf71b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2351,11 +2351,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) { css_get(&memcg->css); commit_charge(folio, memcg); - - local_irq_disable(); - memcg1_charge_statistics(memcg, folio_nr_pages(folio)); - memcg1_check_events(memcg, folio_nid(folio)); - local_irq_enable(); + memcg1_commit_charge(folio, memcg); } static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, @@ -4575,8 +4571,6 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug) static void uncharge_batch(const struct uncharge_gather *ug) { - unsigned long flags; - if (ug->nr_memory) { page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); if (do_memsw_account()) @@ -4588,11 +4582,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg1_oom_recover(ug->memcg); } - local_irq_save(flags); - __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); - __this_cpu_add(ug->memcg->events_percpu->nr_page_events, ug->nr_memory); - memcg1_check_events(ug->memcg, ug->nid); - local_irq_restore(flags); + memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); /* drop reference from uncharge_folio */ css_put(&ug->memcg->css); @@ -4699,7 +4689,6 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new) { struct mem_cgroup *memcg; long nr_pages = folio_nr_pages(new); - unsigned long flags; VM_BUG_ON_FOLIO(!folio_test_locked(old), old); VM_BUG_ON_FOLIO(!folio_test_locked(new), new); @@ -4727,11 +4716,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new) css_get(&memcg->css); commit_charge(new, memcg); - - local_irq_save(flags); - memcg1_charge_statistics(memcg, nr_pages); - memcg1_check_events(memcg, folio_nid(new)); - local_irq_restore(flags); + memcg1_commit_charge(new, memcg); } /** @@ -4967,17 +4952,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) page_counter_uncharge(&memcg->memsw, nr_entries); } - /* - * Interrupts should be disabled here because the caller holds the - * i_pages lock which is taken with interrupts-off. It is - * important here to have the interrupts disabled because it is the - * only synchronisation we have for updating the per-CPU variables. - */ - memcg_stats_lock(); - memcg1_charge_statistics(memcg, -nr_entries); - memcg_stats_unlock(); - memcg1_check_events(memcg, folio_nid(folio)); - + memcg1_swapout(folio, memcg); css_put(&memcg->css); } |