diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-29 00:47:29 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 01:07:41 +0200 |
commit | bb4cc2bea6df7854d629bff114ca03237cc718d6 (patch) | |
tree | 5f164e808a79a8e84029a7d2a2772cf8c02219b2 | |
parent | mm: add per-zone lru list stat (diff) | |
download | linux-bb4cc2bea6df7854d629bff114ca03237cc718d6.tar.xz linux-bb4cc2bea6df7854d629bff114ca03237cc718d6.zip |
mm, vmscan: remove highmem_file_pages
With the reintroduction of per-zone LRU stats, highmem_file_pages is
redundant so remove it.
[mgorman@techsingularity.net: wrong stat is being accumulated in highmem_dirtyable_memory]
Link: http://lkml.kernel.org/r/20160725092324.GM10438@techsingularity.netLink: http://lkml.kernel.org/r/1469110261-7365-3-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm_inline.h | 17 | ||||
-rw-r--r-- | mm/page-writeback.c | 12 |
2 files changed, 4 insertions, 25 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 9cc130f5feb2..71613e8a720f 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -4,22 +4,6 @@ #include <linux/huge_mm.h> #include <linux/swap.h> -#ifdef CONFIG_HIGHMEM -extern atomic_t highmem_file_pages; - -static inline void acct_highmem_file_pages(int zid, enum lru_list lru, - int nr_pages) -{ - if (is_highmem_idx(zid) && is_file_lru(lru)) - atomic_add(nr_pages, &highmem_file_pages); -} -#else -static inline void acct_highmem_file_pages(int zid, enum lru_list lru, - int nr_pages) -{ -} -#endif - /** * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test @@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); - acct_highmem_file_pages(zid, lru, nr_pages); } static __always_inline void update_lru_size(struct lruvec *lruvec, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 573d138fa7a5..7b5920a3500f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) return nr_pages; } -#ifdef CONFIG_HIGHMEM -atomic_t highmem_file_pages; -#endif static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; - unsigned long x; + unsigned long x = 0; int i; - unsigned long dirtyable = 0; for_each_node_state(node, N_HIGH_MEMORY) { for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { @@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) nr_pages = zone_page_state(z, NR_FREE_PAGES); /* watch for underflows */ nr_pages -= min(nr_pages, high_wmark_pages(z)); - dirtyable += nr_pages; + nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); + nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE); + x += nr_pages; } } - x = dirtyable + atomic_read(&highmem_file_pages); - /* * Unreclaimable memory (kernel memory or anonymous memory * without swap) can bring down the dirtyable pages below |