summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorShakeel Butt <shakeelb@google.com>2020-06-04 01:03:19 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-04 05:09:49 +0200
commit21e330fc632d6a288f73de48045b782cc51d501a (patch)
tree545124044848862e471adc9967b14327f50ca779 /mm/swap.c
parentmm: swap: fix vmstats for huge pages (diff)
downloadlinux-21e330fc632d6a288f73de48045b782cc51d501a.tar.xz
linux-21e330fc632d6a288f73de48045b782cc51d501a.zip
mm: swap: memcg: fix memcg stats for huge pages
The commit 2262185c5b28 ("mm: per-cgroup memory reclaim stats") added PGLAZYFREE, PGACTIVATE & PGDEACTIVATE stats for cgroups but missed couple of places and PGLAZYFREE missed huge page handling. Fix that. Also for PGLAZYFREE use the irq-unsafe function to update as the irq is already disabled. Fixes: 2262185c5b28 ("mm: per-cgroup memory reclaim stats") Signed-off-by: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Link: http://lkml.kernel.org/r/20200527182947.251343-1-shakeelb@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/swap.c b/mm/swap.c
index fa07d31184ca..dbcab84c6fce 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,6 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
+ int nr_pages = hpage_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
@@ -327,7 +328,9 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, lru);
trace_mm_lru_activate(page);
- __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
+ __count_vm_events(PGACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
+ nr_pages);
}
}
@@ -565,8 +568,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGROTATED, nr_pages);
}
- if (active)
+ if (active) {
__count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ nr_pages);
+ }
}
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -574,13 +580,16 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
int lru = page_lru_base_type(page);
+ int nr_pages = hpage_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list(page, lruvec, lru);
- __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ nr_pages);
}
}
@@ -590,6 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page);
+ int nr_pages = hpage_nr_pages(page);
del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
@@ -603,8 +613,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
ClearPageSwapBacked(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
- __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
- count_memcg_page_event(page, PGLAZYFREE);
+ __count_vm_events(PGLAZYFREE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
+ nr_pages);
}
}