diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2019-12-01 02:55:11 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-01 21:59:06 +0100 |
commit | cb1ef534ceb745f237eafb72ff5555d74fa49235 (patch) | |
tree | 7fc011091f4a2a0f8449180976978b202128a3ec /mm | |
parent | mm/page_alloc: add alloc_contig_pages() (diff) | |
download | linux-cb1ef534ceb745f237eafb72ff5555d74fa49235.tar.xz linux-cb1ef534ceb745f237eafb72ff5555d74fa49235.zip |
mm, pcp: share common code between memory hotplug and percpu sysctl handler
Both the percpu_pagelist_fraction sysctl handler and memory hotplug have
a common requirement of updating the pcpu page allocation batch and high
values. Split the relevant helper to share common code.
No functional change.
Link: http://lkml.kernel.org/r/20191021094808.28824-3-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Qian Cai <cai@lca.pw>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2e47398ba498..7c3bee1e98ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7988,6 +7988,15 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, return 0; } +static void __zone_pcp_update(struct zone *zone) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + pageset_set_high_and_batch(zone, + per_cpu_ptr(zone->pageset, cpu)); +} + /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu @@ -8019,13 +8028,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) goto out; - for_each_populated_zone(zone) { - unsigned int cpu; - - for_each_possible_cpu(cpu) - pageset_set_high_and_batch(zone, - per_cpu_ptr(zone->pageset, cpu)); - } + for_each_populated_zone(zone) + __zone_pcp_update(zone); out: mutex_unlock(&pcp_batch_high_lock); return ret; @@ -8624,11 +8628,8 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages) */ void __meminit zone_pcp_update(struct zone *zone) { - unsigned cpu; mutex_lock(&pcp_batch_high_lock); - for_each_possible_cpu(cpu) - pageset_set_high_and_batch(zone, - per_cpu_ptr(zone->pageset, cpu)); + __zone_pcp_update(zone); mutex_unlock(&pcp_batch_high_lock); } |