summaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-02-23 00:46:13 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 01:41:30 +0100
commit6d23f8a5d432337aa2590ea8fd5eee8b0bc28eee (patch)
tree8c52582920cc940cd6f9a6657e2357685db2828f /arch/tile
parentmm, page_alloc: warn_alloc print nodemask (diff)
downloadlinux-6d23f8a5d432337aa2590ea8fd5eee8b0bc28eee.tar.xz
linux-6d23f8a5d432337aa2590ea8fd5eee8b0bc28eee.zip
arch, mm: remove arch specific show_mem
We have a generic implementation for quite some time already. If there is any arch specific information to be printed then we should add a callback called from the generic code rather than duplicate the whole show_mem. The current code has resulted in the code duplication and the output divergence which is both confusing and adds maintainance costs. Let's just get rid of this mess. Link: http://lkml.kernel.org/r/20170117091543.25850-4-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn> [UniCore32] Acked-by: Helge Deller <deller@gmx.de> [for parisc] Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/mm/pgtable.c45
1 files changed, 0 insertions, 45 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 7cc6ee7f1a58..492a7361e58e 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -36,51 +36,6 @@
#define K(x) ((x) << (PAGE_SHIFT-10))
-/*
- * The normal show_free_areas() is too verbose on Tile, with dozens
- * of processors and often four NUMA zones each with high and lowmem.
- */
-void show_mem(unsigned int filter)
-{
- struct zone *zone;
-
- pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
- (global_node_page_state(NR_ACTIVE_ANON) +
- global_node_page_state(NR_ACTIVE_FILE)),
- (global_node_page_state(NR_INACTIVE_ANON) +
- global_node_page_state(NR_INACTIVE_FILE)),
- global_node_page_state(NR_FILE_DIRTY),
- global_node_page_state(NR_WRITEBACK),
- global_node_page_state(NR_UNSTABLE_NFS),
- global_page_state(NR_FREE_PAGES),
- (global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE)),
- global_node_page_state(NR_FILE_MAPPED),
- global_page_state(NR_PAGETABLE),
- global_page_state(NR_BOUNCE),
- global_node_page_state(NR_FILE_PAGES),
- get_nr_swap_pages());
-
- for_each_zone(zone) {
- unsigned long flags, order, total = 0, largest_order = -1;
-
- if (!populated_zone(zone))
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order < MAX_ORDER; order++) {
- int nr = zone->free_area[order].nr_free;
- total += nr << order;
- if (nr)
- largest_order = order;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- pr_err("Node %d %7s: %lukB (largest %luKb)\n",
- zone_to_nid(zone), zone->name,
- K(total), largest_order ? K(1UL) << largest_order : 0);
- }
-}
-
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*