diff options
author | Shakeel Butt <shakeelb@google.com> | 2020-06-04 01:03:16 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-04 05:09:49 +0200 |
commit | 5d91f31faf8ebed2acfc3a1d6ac344f95c488d66 (patch) | |
tree | f877c2ae5d1551b93064bcb08012b2c806a91d61 /mm/swap.c | |
parent | mm: vmscan: limit the range of LRU type balancing (diff) | |
download | linux-5d91f31faf8ebed2acfc3a1d6ac344f95c488d66.tar.xz linux-5d91f31faf8ebed2acfc3a1d6ac344f95c488d66.zip |
mm: swap: fix vmstats for huge pages
Many of the callbacks called by pagevec_lru_move_fn() does not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182916.249910-1-shakeelb@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/swap.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/swap.c b/mm/swap.c index 343675d629ae..fa07d31184ca 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, del_page_from_lru_list(page, lruvec, page_lru(page)); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec, page_lru(page)); - (*pgmoved)++; + (*pgmoved) += hpage_nr_pages(page); } } @@ -327,7 +327,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, add_page_to_lru_list(page, lruvec, lru); trace_mm_lru_activate(page); - __count_vm_event(PGACTIVATE); + __count_vm_events(PGACTIVATE, hpage_nr_pages(page)); } } @@ -529,6 +529,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, { int lru; bool active; + int nr_pages = hpage_nr_pages(page); if (!PageLRU(page)) return; @@ -561,11 +562,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, * We moves tha page into tail of inactive. */ add_page_to_lru_list_tail(page, lruvec, lru); - __count_vm_event(PGROTATED); + __count_vm_events(PGROTATED, nr_pages); } if (active) - __count_vm_event(PGDEACTIVATE); + __count_vm_events(PGDEACTIVATE, nr_pages); } static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, @@ -960,6 +961,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, { enum lru_list lru; int was_unevictable = TestClearPageUnevictable(page); + int nr_pages = hpage_nr_pages(page); VM_BUG_ON_PAGE(PageLRU(page), page); @@ -995,13 +997,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, if (page_evictable(page)) { lru = page_lru(page); if (was_unevictable) - count_vm_event(UNEVICTABLE_PGRESCUED); + __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } else { lru = LRU_UNEVICTABLE; ClearPageActive(page); SetPageUnevictable(page); if (!was_unevictable) - count_vm_event(UNEVICTABLE_PGCULLED); + __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); } add_page_to_lru_list(page, lruvec, lru); |