diff options
author | Kirill Tkhai <ktkhai@virtuozzo.com> | 2019-05-14 02:16:51 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 18:47:45 +0200 |
commit | 886cf1901db962cee5f8b82b9b260079a5e8a4eb (patch) | |
tree | 53edc7213da3403ed7b201d08e448719c352bc19 /mm/vmscan.c | |
parent | mm, page_alloc: disallow __GFP_COMP in alloc_pages_exact() (diff) | |
download | linux-886cf1901db962cee5f8b82b9b260079a5e8a4eb.tar.xz linux-886cf1901db962cee5f8b82b9b260079a5e8a4eb.zip |
mm: move recent_rotated pages calculation to shrink_inactive_list()
Patch series "mm: Generalize putback functions"]
putback_inactive_pages() and move_active_pages_to_lru() are almost
similar, so this patchset merges them ina single function.
This patch (of 4):
The patch moves the calculation from putback_inactive_pages() to
shrink_inactive_list(). This makes putback_inactive_pages() looking more
similar to move_active_pages_to_lru().
To do that, we account activated pages in reclaim_stat::nr_activate.
Since a page may change its LRU type from anon to file cache inside
shrink_page_list() (see ClearPageSwapBacked()), we have to account pages
for the both types. So, nr_activate becomes an array.
Previously we used nr_activate to account PGACTIVATE events, but now we
account them into pgactivate variable (since they are about number of
pages in general, not about sum of hpage_nr_pages).
Link: http://lkml.kernel.org/r/155290127956.31489.3393586616054413298.stgit@localhost.localdomain
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index fd9de504e516..e6913e68db2e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1107,6 +1107,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, LIST_HEAD(ret_pages); LIST_HEAD(free_pages); unsigned nr_reclaimed = 0; + unsigned pgactivate = 0; memset(stat, 0, sizeof(*stat)); cond_resched(); @@ -1466,8 +1467,10 @@ activate_locked: try_to_free_swap(page); VM_BUG_ON_PAGE(PageActive(page), page); if (!PageMlocked(page)) { + int type = page_is_file_cache(page); SetPageActive(page); - stat->nr_activate++; + pgactivate++; + stat->nr_activate[type] += hpage_nr_pages(page); count_memcg_page_event(page, PGACTIVATE); } keep_locked: @@ -1482,7 +1485,7 @@ keep: free_unref_page_list(&free_pages); list_splice(&ret_pages, page_list); - count_vm_events(PGACTIVATE, stat->nr_activate); + count_vm_events(PGACTIVATE, pgactivate); return nr_reclaimed; } @@ -1807,7 +1810,6 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, static noinline_for_stack void putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) { - struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct pglist_data *pgdat = lruvec_pgdat(lruvec); LIST_HEAD(pages_to_free); @@ -1833,11 +1835,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) lru = page_lru(page); add_page_to_lru_list(page, lruvec, lru); - if (is_active_lru(lru)) { - int file = is_file_lru(lru); - int numpages = hpage_nr_pages(page); - reclaim_stat->recent_rotated[file] += numpages; - } if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); @@ -1945,6 +1942,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT, nr_reclaimed); } + reclaim_stat->recent_rotated[0] = stat.nr_activate[0]; + reclaim_stat->recent_rotated[1] = stat.nr_activate[1]; putback_inactive_pages(lruvec, &page_list); |