diff options
author | Konstantin Khlebnikov <khlebnikov@openvz.org> | 2012-05-30 00:07:02 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-30 01:22:27 +0200 |
commit | 90bdcfafdc660b359018262f0f8630d100e28760 (patch) | |
tree | f88bc1668a2f5dc9558a5498359ff295d90d792c | |
parent | mm/vmscan: push lruvec pointer into get_scan_count() (diff) | |
download | linux-90bdcfafdc660b359018262f0f8630d100e28760.tar.xz linux-90bdcfafdc660b359018262f0f8630d100e28760.zip |
mm/vmscan: push lruvec pointer into should_continue_reclaim()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmscan.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index b139ad7f396e..1d251b5b0a06 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1708,14 +1708,13 @@ static bool in_reclaim_compaction(struct scan_control *sc) * calls try_to_compact_zone() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ -static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, +static inline bool should_continue_reclaim(struct lruvec *lruvec, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) { unsigned long pages_for_compaction; unsigned long inactive_lru_pages; - struct lruvec *lruvec; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) @@ -1748,7 +1747,6 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ - lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); pages_for_compaction = (2UL << sc->order); inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE); if (nr_swap_pages > 0) @@ -1759,7 +1757,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, return true; /* If compaction would go ahead or the allocation would succeed, stop */ - switch (compaction_suitable(mz->zone, sc->order)) { + switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; @@ -1826,7 +1824,7 @@ restart: sc, LRU_ACTIVE_ANON); /* reclaim/compaction might need reclaim to continue */ - if (should_continue_reclaim(mz, nr_reclaimed, + if (should_continue_reclaim(lruvec, nr_reclaimed, sc->nr_scanned - nr_scanned, sc)) goto restart; |