summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2020-04-02 06:06:56 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 18:35:28 +0200
commit4b13f64de25686583db3e359b1b8e59049278b50 (patch)
tree01c162d78f334e1c650f6549df41c3e9be3d7f2a /mm
parentmm: memcg/slab: cache page number in memcg_(un)charge_slab() (diff)
downloadlinux-4b13f64de25686583db3e359b1b8e59049278b50.tar.xz
linux-4b13f64de25686583db3e359b1b8e59049278b50.zip
mm: kmem: rename (__)memcg_kmem_(un)charge_memcg() to __memcg_kmem_(un)charge()
Drop the _memcg suffix from (__)memcg_kmem_(un)charge functions. It's shorter and more obvious. These are the most basic functions which are just (un)charging the given cgroup with the given amount of pages. Also fix up the corresponding comments. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Link: http://lkml.kernel.org/r/20200109202659.752357-7-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c40
-rw-r--r--mm/slab.h4
2 files changed, 22 insertions, 22 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a00866981175..e6043ab3017c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2881,15 +2881,15 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
}
/**
- * __memcg_kmem_charge_memcg: charge a kmem page
+ * __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge
* @gfp: reclaim mode
* @nr_pages: number of pages to charge
*
* Returns 0 on success, an error code on failure.
*/
-int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned int nr_pages)
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned int nr_pages)
{
struct page_counter *counter;
int ret;
@@ -2917,6 +2917,21 @@ int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
}
/**
+ * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
+ * @memcg: memcg to uncharge
+ * @nr_pages: number of pages to uncharge
+ */
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
+{
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ page_counter_uncharge(&memcg->kmem, nr_pages);
+
+ page_counter_uncharge(&memcg->memory, nr_pages);
+ if (do_memsw_account())
+ page_counter_uncharge(&memcg->memsw, nr_pages);
+}
+
+/**
* __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
* @page: page to charge
* @gfp: reclaim mode
@@ -2934,7 +2949,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
memcg = get_mem_cgroup_from_current();
if (!mem_cgroup_is_root(memcg)) {
- ret = __memcg_kmem_charge_memcg(memcg, gfp, 1 << order);
+ ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
if (!ret) {
page->mem_cgroup = memcg;
__SetPageKmemcg(page);
@@ -2945,21 +2960,6 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
}
/**
- * __memcg_kmem_uncharge_memcg: uncharge a kmem page
- * @memcg: memcg to uncharge
- * @nr_pages: number of pages to uncharge
- */
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- page_counter_uncharge(&memcg->kmem, nr_pages);
-
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
-}
-/**
* __memcg_kmem_uncharge_page: uncharge a kmem page
* @page: page to uncharge
* @order: allocation order
@@ -2973,7 +2973,7 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
return;
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
- __memcg_kmem_uncharge_memcg(memcg, nr_pages);
+ __memcg_kmem_uncharge(memcg, nr_pages);
page->mem_cgroup = NULL;
/* slab pages do not have PageKmemcg flag set */
diff --git a/mm/slab.h b/mm/slab.h
index 43f8ce4aa325..207c83ef6e06 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -366,7 +366,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
return 0;
}
- ret = memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
+ ret = memcg_kmem_charge(memcg, gfp, nr_pages);
if (ret)
goto out;
@@ -397,7 +397,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
- memcg_kmem_uncharge_memcg(memcg, nr_pages);
+ memcg_kmem_uncharge(memcg, nr_pages);
} else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-nr_pages);