diff options
author | Konstantin Khlebnikov <khlebnikov@yandex-team.ru> | 2015-04-15 00:45:30 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 01:49:01 +0200 |
commit | d1bfcdb8ce0ea6eb6034daa7ff02548e0bc9c21b (patch) | |
tree | af4e212c85863c7d58d3366e0ba18ac07fbdd510 | |
parent | page_writeback: clean up mess around cancel_dirty_page() (diff) | |
download | linux-d1bfcdb8ce0ea6eb6034daa7ff02548e0bc9c21b.tar.xz linux-d1bfcdb8ce0ea6eb6034daa7ff02548e0bc9c21b.zip |
mm: hide per-cpu lists in output of show_mem()
This makes show_mem() much less verbose on huge machines. Instead of huge
and almost useless dump of counters for each per-zone per-cpu lists this
patch prints the sum of these counters for each zone (free_pcp) and size
of per-cpu list for current cpu (local_pcp).
The filter flag SHOW_MEM_PERCPU_LISTS reverts to the old verbose mode.
[akpm@linux-foundation.org: update show_free_areas comment]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/page_alloc.c | 39 |
2 files changed, 31 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 6571dd78e984..9c21b42d07bf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1126,6 +1126,7 @@ extern void pagefault_out_of_memory(void); * various contexts. */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ +#define SHOW_MEM_PERCPU_LISTS (0x0002u) /* per-zone per-cpu */ extern void show_free_areas(unsigned int flags); extern bool skip_free_areas_node(unsigned int flags, int nid); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6dfa5b24cc79..eab8e2018a46 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3251,25 +3251,37 @@ static void show_migration_types(unsigned char type) * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list. - * Suppresses nodes that are not allowed by current's cpuset if - * SHOW_MEM_FILTER_NODES is passed. + * + * Bits in @filter: + * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's + * cpuset. + * SHOW_MEM_PERCPU_LISTS: display full per-node per-cpu pcp lists */ void show_free_areas(unsigned int filter) { + unsigned long free_pcp = 0; int cpu; struct zone *zone; for_each_populated_zone(zone) { if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; - show_node(zone); - printk("%s per-cpu:\n", zone->name); + + if (filter & SHOW_MEM_PERCPU_LISTS) { + show_node(zone); + printk("%s per-cpu:\n", zone->name); + } for_each_online_cpu(cpu) { struct per_cpu_pageset *pageset; pageset = per_cpu_ptr(zone->pageset, cpu); + free_pcp += pageset->pcp.count; + + if (!(filter & SHOW_MEM_PERCPU_LISTS)) + continue; + printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", cpu, pageset->pcp.high, pageset->pcp.batch, pageset->pcp.count); @@ -3278,11 +3290,10 @@ void show_free_areas(unsigned int filter) printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" - " unevictable:%lu" - " dirty:%lu writeback:%lu unstable:%lu\n" - " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" + " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" + " slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" - " free_cma:%lu\n", + " free:%lu free_pcp:%lu free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), @@ -3293,13 +3304,14 @@ void show_free_areas(unsigned int filter) global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), - global_page_state(NR_FREE_PAGES), global_page_state(NR_SLAB_RECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE), + global_page_state(NR_FREE_PAGES), + free_pcp, global_page_state(NR_FREE_CMA_PAGES)); for_each_populated_zone(zone) { @@ -3307,6 +3319,11 @@ void show_free_areas(unsigned int filter) if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; + + free_pcp = 0; + for_each_online_cpu(cpu) + free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; + show_node(zone); printk("%s" " free:%lukB" @@ -3333,6 +3350,8 @@ void show_free_areas(unsigned int filter) " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" + " free_pcp:%lukB" + " local_pcp:%ukB" " free_cma:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" @@ -3364,6 +3383,8 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), + K(free_pcp), + K(this_cpu_read(zone->pageset->pcp.count)), K(zone_page_state(zone, NR_FREE_CMA_PAGES)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), K(zone_page_state(zone, NR_PAGES_SCANNED)), |