diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-04-21 21:24:57 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-21 22:41:51 +0200 |
commit | 2e2e425989080cc534fc0fca154cae515f971cf5 (patch) | |
tree | c4acbd403164d1d210e3bc274b7d93487071153e /mm | |
parent | edac: ppc mpc85xx fix mc err detect (diff) | |
download | linux-2e2e425989080cc534fc0fca154cae515f971cf5.tar.xz linux-2e2e425989080cc534fc0fca154cae515f971cf5.zip |
vmscan,memcg: reintroduce sc->may_swap
Commit a6dc60f8975ad96d162915e07703a4439c80dcf0 ("vmscan: rename
sc.may_swap to may_unmap") removed the may_swap flag, but memcg had used
it as a flag for "we need to use swap?", as the name indicate.
And in the current implementation, memcg cannot reclaim mapped file
caches when mem+swap hits the limit.
re-introduce may_swap flag and handle it at get_scan_ratio(). This
patch doesn't influence any scan_control users other than memcg.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 99155b7b8123..eac9577941f9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -63,6 +63,9 @@ struct scan_control { /* Can mapped pages be reclaimed? */ int may_unmap; + /* Can pages be swapped as part of reclaim? */ + int may_swap; + /* This context's SWAP_CLUSTER_MAX. If freeing memory for * suspend, we effectively ignore SWAP_CLUSTER_MAX. * In this context, it doesn't matter that we scan the @@ -1380,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); /* If we have no swap space, do not bother scanning anon pages. */ - if (nr_swap_pages <= 0) { + if (!sc->may_swap || (nr_swap_pages <= 0)) { percent[0] = 0; percent[1] = 100; return; @@ -1697,6 +1700,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .swap_cluster_max = SWAP_CLUSTER_MAX, .may_unmap = 1, + .may_swap = 1, .swappiness = vm_swappiness, .order = order, .mem_cgroup = NULL, @@ -1717,6 +1721,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, struct scan_control sc = { .may_writepage = !laptop_mode, .may_unmap = 1, + .may_swap = !noswap, .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = swappiness, .order = 0, @@ -1726,9 +1731,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, }; struct zonelist *zonelist; - if (noswap) - sc.may_unmap = 0; - sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); zonelist = NODE_DATA(numa_node_id())->node_zonelists; @@ -1767,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_unmap = 1, + .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = vm_swappiness, .order = order, @@ -2298,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), + .may_swap = 1, .swap_cluster_max = max_t(unsigned long, nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, |