summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-29 00:45:53 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 01:07:41 +0200
commit79dafcdca31386cfe0fe95b1c7f30a85209af166 (patch)
tree9954c30bb6048650106c03bbd5e9aaa8e79b9bfd /mm/vmscan.c
parentmm, vmscan: simplify the logic deciding whether kswapd sleeps (diff)
downloadlinux-79dafcdca31386cfe0fe95b1c7f30a85209af166.tar.xz
linux-79dafcdca31386cfe0fe95b1c7f30a85209af166.zip
mm, vmscan: by default have direct reclaim only shrink once per node
Direct reclaim iterates over all zones in the zonelist and shrinking them but this is in conflict with node-based reclaim. In the default case, only shrink once per node. Link: http://lkml.kernel.org/r/1467970510-21195-11-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/vmscan.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 905c60473126..01fe4708e404 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2552,14 +2552,6 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_
* try to reclaim pages from zones which will satisfy the caller's allocation
* request.
*
- * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
- * Because:
- * a) The caller may be trying to free *extra* pages to satisfy a higher-order
- * allocation or
- * b) The target zone may be at high_wmark_pages(zone) but the lower zones
- * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
- * zone defense algorithm.
- *
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
@@ -2571,6 +2563,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
unsigned long nr_soft_scanned;
gfp_t orig_mask;
enum zone_type classzone_idx;
+ pg_data_t *last_pgdat = NULL;
/*
* If the number of buffer_heads in the machine exceeds the maximum
@@ -2630,6 +2623,15 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
}
/*
+ * Shrink each node in the zonelist once. If the
+ * zonelist is ordered by zone (not the default) then a
+ * node may be shrunk multiple times but in that case
+ * the user prefers lower zones being preserved.
+ */
+ if (zone->zone_pgdat == last_pgdat)
+ continue;
+
+ /*
* This steals pages from memory cgroups over softlimit
* and returns the number of reclaimed pages and
* scanned pages. This works for global memory pressure
@@ -2644,6 +2646,10 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
+ /* See comment about same check for global reclaim above */
+ if (zone->zone_pgdat == last_pgdat)
+ continue;
+ last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc, classzone_idx);
}