diff options
author | David Hildenbrand <david@redhat.com> | 2020-08-07 08:25:27 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 20:33:28 +0200 |
commit | 0a18e60788d6a39436e8b5e91001b790043fc29c (patch) | |
tree | 7afd333b77453c7bf2b16cfde59b6eb90a2a77c0 | |
parent | mm, page_alloc: skip ->waternark_boost for atomic order-0 allocations (diff) | |
download | linux-0a18e60788d6a39436e8b5e91001b790043fc29c.tar.xz linux-0a18e60788d6a39436e8b5e91001b790043fc29c.zip |
mm: remove vm_total_pages
The global variable "vm_total_pages" is a relic from older days. There is
only a single user that reads the variable - build_all_zonelists() - and
the first thing it does is update it.
Use a local variable in build_all_zonelists() instead and remove the
global variable.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Minchan Kim <minchan@kernel.org>
Link: http://lkml.kernel.org/r/20200619132410.23859-2-david@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 1 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 3 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 5 |
5 files changed, 4 insertions, 13 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 5b3216ba39a9..4ab236692e05 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -372,7 +372,6 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); -extern unsigned long vm_total_pages; extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index da374cd3d45b..be3c62e3fb95 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -844,8 +844,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, kswapd_run(nid); kcompactd_run(nid); - vm_total_pages = nr_free_pagecache_pages(); - writeback_set_ratelimit(); memory_notify(MEM_ONLINE, &arg); @@ -1595,7 +1593,6 @@ static int __ref __offline_pages(unsigned long start_pfn, kcompactd_stop(node); } - vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 28b3e7a67565..4e4ddd67b71e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2076,13 +2076,11 @@ static int page_writeback_cpu_online(unsigned int cpu) * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory - * related to pages that could be allocated for buffers (by - * comparing nr_free_buffer_pages() to vm_total_pages. + * related to pages that could be allocated for buffers. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" - * is now applied to total non-HIGHPAGE memory (by subtracting - * totalhigh_pages from vm_total_pages), and as such we can't + * is now applied to total non-HIGHPAGE memory, and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0fb5c97ac94c..20184e2a8cfe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5912,6 +5912,8 @@ build_all_zonelists_init(void) */ void __ref build_all_zonelists(pg_data_t *pgdat) { + unsigned long vm_total_pages; + if (system_state == SYSTEM_BOOTING) { build_all_zonelists_init(); } else { diff --git a/mm/vmscan.c b/mm/vmscan.c index 23156c252e0a..d4d7cd1d24c1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -170,11 +170,6 @@ struct scan_control { * From 0 .. 200. Higher means more swappy. */ int vm_swappiness = 60; -/* - * The total number of pages which are beyond the high watermark within all - * zones. - */ -unsigned long vm_total_pages; static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) |