diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 11:12:17 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 17:58:18 +0200 |
commit | dd1a239f6f2d4d3eedd318583ec319aa145b324c (patch) | |
tree | aff4224c96b5e2e67588c3946858a724863eeaf9 /mm/vmscan.c | |
parent | mm: use two zonelist that are filtered by GFP mask (diff) | |
download | linux-dd1a239f6f2d4d3eedd318583ec319aa145b324c.tar.xz linux-dd1a239f6f2d4d3eedd318583ec319aa145b324c.zip |
mm: have zonelist contains structs with both a zone pointer and zone_idx
Filtering zonelists requires very frequent use of zone_idx(). This is costly
as it involves a lookup of another structure and a substraction operation. As
the zone_idx is often required, it should be quickly accessible. The node idx
could also be stored here if it was found that accessing zone->node is
significant which may be the case on workloads where nodemasks are heavily
used.
This patch introduces a struct zoneref to store a zone pointer and a zone
index. The zonelist then consists of an array of these struct zonerefs which
are looked up as necessary. Helpers are given for accessing the zone index as
well as the node index.
[kamezawa.hiroyu@jp.fujitsu.com: Suggested struct zoneref instead of embedding information in pointers]
[hugh@veritas.com: mm-have-zonelist: fix memcg ooms]
[hugh@veritas.com: just return do_try_to_free_pages]
[hugh@veritas.com: do_try_to_free_pages gfp_mask redundant]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/vmscan.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 0515b8f44894..eceac9f9032f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1251,7 +1251,7 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, { enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); unsigned long nr_reclaimed = 0; - struct zone **z; + struct zoneref *z; struct zone *zone; sc->all_unreclaimable = 1; @@ -1301,7 +1301,7 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, * allocation attempt will fail. */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, - gfp_t gfp_mask, struct scan_control *sc) + struct scan_control *sc) { int priority; int ret = 0; @@ -1309,9 +1309,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; - struct zone **z; + struct zoneref *z; struct zone *zone; - enum zone_type high_zoneidx = gfp_zone(gfp_mask); + enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); if (scan_global_lru(sc)) count_vm_event(ALLOCSTALL); @@ -1339,7 +1339,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, * over limit cgroups */ if (scan_global_lru(sc)) { - shrink_slab(sc->nr_scanned, gfp_mask, lru_pages); + shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); if (reclaim_state) { nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; @@ -1410,7 +1410,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .isolate_pages = isolate_pages_global, }; - return do_try_to_free_pages(zonelist, gfp_mask, &sc); + return do_try_to_free_pages(zonelist, &sc); } #ifdef CONFIG_CGROUP_MEM_RES_CTLR @@ -1419,7 +1419,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, gfp_t gfp_mask) { struct scan_control sc = { - .gfp_mask = gfp_mask, .may_writepage = !laptop_mode, .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, @@ -1429,12 +1428,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .isolate_pages = mem_cgroup_isolate_pages, }; struct zonelist *zonelist; - int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE); - zonelist = &NODE_DATA(numa_node_id())->node_zonelists[target_zone]; - if (do_try_to_free_pages(zonelist, sc.gfp_mask, &sc)) - return 1; - return 0; + sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | + (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); + zonelist = NODE_DATA(numa_node_id())->node_zonelists; + return do_try_to_free_pages(zonelist, &sc); } #endif |