summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-03-15 22:57:01 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 00:55:16 +0100
commit0db2cb8da89d991762ec2aece45e55ceaee34664 (patch)
treeb8821119e212ee45266cc73ba36a04a84247b6f6 /mm
parentmm/madvise: update comment on sys_madvise() (diff)
downloadlinux-0db2cb8da89d991762ec2aece45e55ceaee34664.tar.xz
linux-0db2cb8da89d991762ec2aece45e55ceaee34664.zip
mm, vmscan: make zone_reclaimable_pages more precise
zone_reclaimable_pages() is used in should_reclaim_retry() which uses it to calculate the target for the watermark check. This means that precise numbers are important for the correct decision. zone_reclaimable_pages uses zone_page_state which can contain stale data with per-cpu diffs not synced yet (the last vmstat_update might have run 1s in the past). Use zone_page_state_snapshot() in zone_reclaimable_pages() instead. None of the current callers is in a hot path where getting the precise value (which involves per-cpu iteration) would cause an unreasonable overhead. Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Suggested-by: David Rientjes <rientjes@google.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/vmscan.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index db5722a07d4f..039f08d369a5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -195,21 +195,21 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
{
unsigned long nr;
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE) +
- zone_page_state(zone, NR_ISOLATED_FILE);
+ nr = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_ISOLATED_FILE);
if (get_nr_swap_pages() > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON) +
- zone_page_state(zone, NR_ISOLATED_ANON);
+ nr += zone_page_state_snapshot(zone, NR_ACTIVE_ANON) +
+ zone_page_state_snapshot(zone, NR_INACTIVE_ANON) +
+ zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
return nr;
}
bool zone_reclaimable(struct zone *zone)
{
- return zone_page_state(zone, NR_PAGES_SCANNED) <
+ return zone_page_state_snapshot(zone, NR_PAGES_SCANNED) <
zone_reclaimable_pages(zone) * 6;
}