diff options
author | Jiang Liu <liuj97@gmail.com> | 2013-02-23 01:33:52 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-24 02:50:14 +0100 |
commit | b40da04946aa7b603b2aa4dd479f83b2c9090d96 (patch) | |
tree | e4c93fd9375f9c90449ef37f4456fbd3c5a7b6b6 /mm/vmscan.c | |
parent | mm/memblock.c: use CONFIG_HAVE_MEMBLOCK_NODE_MAP to protect movablecore_map i... (diff) | |
download | linux-b40da04946aa7b603b2aa4dd479f83b2c9090d96.tar.xz linux-b40da04946aa7b603b2aa4dd479f83b2c9090d96.zip |
mm: use zone->present_pages instead of zone->managed_pages where appropriate
Now we have zone->managed_pages for "pages managed by the buddy system
in the zone", so replace zone->present_pages with zone->managed_pages if
what the user really wants is number of allocatable pages.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Maciej Rutecki <maciej.rutecki@gmail.com>
Cc: Chris Clayton <chris2553@googlemail.com>
Cc: "Rafael J . Wysocki" <rjw@sisk.pl>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Jianguo Wu <wujianguo@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 4093b99044f6..8fde2fc223d9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2010,7 +2010,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) * a reasonable chance of completing and allocating the page */ balance_gap = min(low_wmark_pages(zone), - (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / + (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); @@ -2525,7 +2525,7 @@ static bool zone_balanced(struct zone *zone, int order, */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) { - unsigned long present_pages = 0; + unsigned long managed_pages = 0; unsigned long balanced_pages = 0; int i; @@ -2536,7 +2536,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) if (!populated_zone(zone)) continue; - present_pages += zone->present_pages; + managed_pages += zone->managed_pages; /* * A special case here: @@ -2546,18 +2546,18 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) * they must be considered balanced here as well! */ if (zone->all_unreclaimable) { - balanced_pages += zone->present_pages; + balanced_pages += zone->managed_pages; continue; } if (zone_balanced(zone, order, 0, i)) - balanced_pages += zone->present_pages; + balanced_pages += zone->managed_pages; else if (!order) return false; } if (order) - return balanced_pages >= (present_pages >> 2); + return balanced_pages >= (managed_pages >> 2); else return true; } @@ -2745,7 +2745,7 @@ loop_again: * of the zone, whichever is smaller. */ balance_gap = min(low_wmark_pages(zone), - (zone->present_pages + + (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); /* |